81 #include "llvm/IR/IntrinsicsARM.h" 116 using namespace llvm;
119 #define DEBUG_TYPE "arm-isel" 121 STATISTIC(NumTailCalls,
"Number of tail calls");
122 STATISTIC(NumMovwMovt,
"Number of GAs materialized with movw + movt");
123 STATISTIC(NumLoopByVals,
"Number of loops generated for byval arguments");
125 "Number of constants with their storage promoted into constant pools");
129 cl::desc(
"Enable / disable ARM interworking (for debugging only)"),
134 cl::desc(
"Enable / disable promotion of unnamed_addr constants into " 139 cl::desc(
"Maximum size of constant to promote into a constant pool"),
143 cl::desc(
"Maximum size of ALL constants to promote into a constant pool"),
148 cl::desc(
"Maximum interleave factor for MVE VLDn to generate."),
153 ARM::R0, ARM::R1,
ARM::R2, ARM::R3
156 void ARMTargetLowering::addTypeForNEON(
MVT VT,
MVT PromotedLdStVT,
157 MVT PromotedBitwiseVT) {
158 if (VT != PromotedLdStVT) {
159 setOperationAction(
ISD::LOAD, VT, Promote);
160 AddPromotedToType (
ISD::LOAD, VT, PromotedLdStVT);
163 AddPromotedToType (
ISD::STORE, VT, PromotedLdStVT);
191 setOperationAction(
ISD::SHL, VT, Custom);
192 setOperationAction(
ISD::SRA, VT, Custom);
193 setOperationAction(
ISD::SRL, VT, Custom);
197 if (VT.
isInteger() && VT != PromotedBitwiseVT) {
198 setOperationAction(
ISD::AND, VT, Promote);
199 AddPromotedToType (
ISD::AND, VT, PromotedBitwiseVT);
200 setOperationAction(
ISD::OR, VT, Promote);
201 AddPromotedToType (
ISD::OR, VT, PromotedBitwiseVT);
202 setOperationAction(
ISD::XOR, VT, Promote);
203 AddPromotedToType (
ISD::XOR, VT, PromotedBitwiseVT);
207 setOperationAction(
ISD::SDIV, VT, Expand);
208 setOperationAction(
ISD::UDIV, VT, Expand);
209 setOperationAction(
ISD::FDIV, VT, Expand);
210 setOperationAction(
ISD::SREM, VT, Expand);
211 setOperationAction(
ISD::UREM, VT, Expand);
212 setOperationAction(
ISD::FREM, VT, Expand);
217 setOperationAction(Opcode, VT,
Legal);
220 setOperationAction(Opcode, VT,
Legal);
223 void ARMTargetLowering::addDRTypeForNEON(
MVT VT) {
224 addRegisterClass(VT, &ARM::DPRRegClass);
228 void ARMTargetLowering::addQRTypeForNEON(
MVT VT) {
229 addRegisterClass(VT, &ARM::DPairRegClass);
233 void ARMTargetLowering::setAllExpand(
MVT VT) {
235 setOperationAction(Opc, VT, Expand);
246 void ARMTargetLowering::addAllExtLoads(
const MVT From,
const MVT To,
253 void ARMTargetLowering::addMVEVectorTypes(
bool HasMVEFP) {
256 for (
auto VT : IntTypes) {
257 addRegisterClass(VT, &ARM::MQPRRegClass);
262 setOperationAction(
ISD::SHL, VT, Custom);
263 setOperationAction(
ISD::SRA, VT, Custom);
264 setOperationAction(
ISD::SRL, VT, Custom);
274 setOperationAction(
ISD::CTTZ, VT, Custom);
283 setOperationAction(
ISD::UDIV, VT, Expand);
284 setOperationAction(
ISD::SDIV, VT, Expand);
285 setOperationAction(
ISD::UREM, VT, Expand);
286 setOperationAction(
ISD::SREM, VT, Expand);
306 setIndexedLoadAction(
im, VT,
Legal);
307 setIndexedStoreAction(
im, VT,
Legal);
308 setIndexedMaskedLoadAction(
im, VT,
Legal);
309 setIndexedMaskedStoreAction(
im, VT,
Legal);
314 for (
auto VT : FloatTypes) {
315 addRegisterClass(VT, &ARM::MQPRRegClass);
334 setIndexedLoadAction(
im, VT,
Legal);
335 setIndexedStoreAction(
im, VT,
Legal);
336 setIndexedMaskedLoadAction(
im, VT,
Legal);
337 setIndexedMaskedStoreAction(
im, VT,
Legal);
346 setOperationAction(
ISD::FDIV, VT, Expand);
347 setOperationAction(
ISD::FREM, VT, Expand);
349 setOperationAction(
ISD::FSIN, VT, Expand);
350 setOperationAction(
ISD::FCOS, VT, Expand);
351 setOperationAction(
ISD::FPOW, VT, Expand);
352 setOperationAction(
ISD::FLOG, VT, Expand);
355 setOperationAction(
ISD::FEXP, VT, Expand);
365 for (
auto VT : LongTypes) {
366 addRegisterClass(VT, &ARM::MQPRRegClass);
398 setIndexedLoadAction(
im, VT,
Legal);
399 setIndexedStoreAction(
im, VT,
Legal);
400 setIndexedMaskedLoadAction(
im, VT,
Legal);
401 setIndexedMaskedStoreAction(
im, VT,
Legal);
407 for (
auto VT : pTypes) {
408 addRegisterClass(VT, &ARM::VCCRRegClass);
417 setOperationAction(
ISD::LOAD, VT, Custom);
434 for (
int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
444 static const struct {
446 const char *
const Name;
451 { RTLIB::SUB_F32,
"__subsf3vfp", ISD::SETCC_INVALID },
452 { RTLIB::MUL_F32,
"__mulsf3vfp", ISD::SETCC_INVALID },
453 { RTLIB::DIV_F32,
"__divsf3vfp", ISD::SETCC_INVALID },
456 { RTLIB::ADD_F64,
"__adddf3vfp", ISD::SETCC_INVALID },
457 { RTLIB::SUB_F64,
"__subdf3vfp", ISD::SETCC_INVALID },
458 { RTLIB::MUL_F64,
"__muldf3vfp", ISD::SETCC_INVALID },
459 { RTLIB::DIV_F64,
"__divdf3vfp", ISD::SETCC_INVALID },
463 { RTLIB::UNE_F32,
"__nesf2vfp", ISD::SETNE },
464 { RTLIB::OLT_F32,
"__ltsf2vfp", ISD::SETNE },
465 { RTLIB::OLE_F32,
"__lesf2vfp", ISD::SETNE },
466 { RTLIB::OGE_F32,
"__gesf2vfp", ISD::SETNE },
467 { RTLIB::OGT_F32,
"__gtsf2vfp", ISD::SETNE },
468 { RTLIB::UO_F32,
"__unordsf2vfp", ISD::SETNE },
471 { RTLIB::OEQ_F64,
"__eqdf2vfp", ISD::SETNE },
472 { RTLIB::UNE_F64,
"__nedf2vfp", ISD::SETNE },
473 { RTLIB::OLT_F64,
"__ltdf2vfp", ISD::SETNE },
474 { RTLIB::OLE_F64,
"__ledf2vfp", ISD::SETNE },
475 { RTLIB::OGE_F64,
"__gedf2vfp", ISD::SETNE },
476 { RTLIB::OGT_F64,
"__gtdf2vfp", ISD::SETNE },
477 { RTLIB::UO_F64,
"__unorddf2vfp", ISD::SETNE },
482 { RTLIB::FPTOSINT_F64_I32,
"__fixdfsivfp", ISD::SETCC_INVALID },
483 { RTLIB::FPTOUINT_F64_I32,
"__fixunsdfsivfp", ISD::SETCC_INVALID },
484 { RTLIB::FPTOSINT_F32_I32,
"__fixsfsivfp", ISD::SETCC_INVALID },
485 { RTLIB::FPTOUINT_F32_I32,
"__fixunssfsivfp", ISD::SETCC_INVALID },
488 { RTLIB::FPROUND_F64_F32,
"__truncdfsf2vfp", ISD::SETCC_INVALID },
489 { RTLIB::FPEXT_F32_F64,
"__extendsfdf2vfp", ISD::SETCC_INVALID },
496 { RTLIB::SINTTOFP_I32_F64,
"__floatsidfvfp", ISD::SETCC_INVALID },
497 { RTLIB::UINTTOFP_I32_F64,
"__floatunssidfvfp", ISD::SETCC_INVALID },
498 { RTLIB::SINTTOFP_I32_F32,
"__floatsisfvfp", ISD::SETCC_INVALID },
499 { RTLIB::UINTTOFP_I32_F32,
"__floatunssisfvfp", ISD::SETCC_INVALID },
502 for (
const auto &LC : LibraryCalls) {
519 static const struct {
521 const char *
const Name;
606 for (
const auto &LC : LibraryCalls) {
616 static const struct {
618 const char *
const Name;
621 } MemOpsLibraryCalls[] = {
629 for (
const auto &LC : MemOpsLibraryCalls) {
639 static const struct {
641 const char *
const Name;
645 { RTLIB::FPTOSINT_F64_I64,
"__dtoi64", CallingConv::ARM_AAPCS_VFP },
646 { RTLIB::FPTOUINT_F32_I64,
"__stou64", CallingConv::ARM_AAPCS_VFP },
647 { RTLIB::FPTOUINT_F64_I64,
"__dtou64", CallingConv::ARM_AAPCS_VFP },
648 { RTLIB::SINTTOFP_I64_F32,
"__i64tos", CallingConv::ARM_AAPCS_VFP },
649 { RTLIB::SINTTOFP_I64_F64,
"__i64tod", CallingConv::ARM_AAPCS_VFP },
650 { RTLIB::UINTTOFP_I64_F32,
"__u64tos", CallingConv::ARM_AAPCS_VFP },
651 { RTLIB::UINTTOFP_I64_F64,
"__u64tod", CallingConv::ARM_AAPCS_VFP },
654 for (
const auto &LC : LibraryCalls) {
686 static const struct {
688 const char *
const Name;
692 { RTLIB::FPROUND_F64_F16,
"__aeabi_d2h", CallingConv::ARM_AAPCS },
693 { RTLIB::FPEXT_F16_F32,
"__aeabi_h2f", CallingConv::ARM_AAPCS },
696 for (
const auto &LC : LibraryCalls) {
730 addAllExtLoads(VT, InnerVT,
Expand);
751 if (Subtarget->
hasLOB()) {
1047 if (Subtarget->
hasDSP()) {
1141 HasStandaloneRem =
false;
1146 const char *
const Name;
1148 } LibraryCalls[] = {
1150 { RTLIB::SDIVREM_I16,
"__rt_sdiv", CallingConv::ARM_AAPCS },
1151 { RTLIB::SDIVREM_I32,
"__rt_sdiv", CallingConv::ARM_AAPCS },
1152 { RTLIB::SDIVREM_I64,
"__rt_sdiv64", CallingConv::ARM_AAPCS },
1154 { RTLIB::UDIVREM_I8,
"__rt_udiv", CallingConv::ARM_AAPCS },
1155 { RTLIB::UDIVREM_I16,
"__rt_udiv", CallingConv::ARM_AAPCS },
1156 { RTLIB::UDIVREM_I32,
"__rt_udiv", CallingConv::ARM_AAPCS },
1157 { RTLIB::UDIVREM_I64,
"__rt_udiv64", CallingConv::ARM_AAPCS },
1160 for (
const auto &LC : LibraryCalls) {
1167 const char *
const Name;
1169 } LibraryCalls[] = {
1171 { RTLIB::SDIVREM_I16,
"__aeabi_idivmod", CallingConv::ARM_AAPCS },
1172 { RTLIB::SDIVREM_I32,
"__aeabi_idivmod", CallingConv::ARM_AAPCS },
1173 { RTLIB::SDIVREM_I64,
"__aeabi_ldivmod", CallingConv::ARM_AAPCS },
1175 { RTLIB::UDIVREM_I8,
"__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1176 { RTLIB::UDIVREM_I16,
"__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1177 { RTLIB::UDIVREM_I32,
"__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1178 { RTLIB::UDIVREM_I64,
"__aeabi_uldivmod", CallingConv::ARM_AAPCS },
1181 for (
const auto &LC : LibraryCalls) {
1225 InsertFencesForAtomic =
false;
1239 InsertFencesForAtomic =
true;
1246 InsertFencesForAtomic =
true;
1266 if (!InsertFencesForAtomic) {
1505 std::pair<const TargetRegisterClass *, uint8_t>
1518 RRC = &ARM::DPRRegClass;
1528 RRC = &ARM::DPRRegClass;
1532 RRC = &ARM::DPRRegClass;
1536 RRC = &ARM::DPRRegClass;
1540 return std::make_pair(RRC, Cost);
1735 return &ARM::QQPRRegClass;
1737 return &ARM::QQQQPRRegClass;
1746 unsigned &PrefAlign)
const {
1747 if (!isa<MemIntrinsic>(CI))
1768 for (
unsigned i = 0; i != NumVals; ++i) {
1800 if (
auto Const = dyn_cast<ConstantSDNode>(Op.
getOperand(1)))
1801 return Const->getZExtValue() == 16;
1808 if (
auto Const = dyn_cast<ConstantSDNode>(Op.
getOperand(1)))
1809 return Const->getZExtValue() == 16;
1816 if (
auto Const = dyn_cast<ConstantSDNode>(Op.
getOperand(1)))
1817 return Const->getZExtValue() == 16;
1886 bool isVarArg)
const {
1924 bool isVarArg)
const {
1925 return CCAssignFnForNode(CC,
false, isVarArg);
1929 bool isVarArg)
const {
1930 return CCAssignFnForNode(CC,
true, isVarArg);
1937 bool isVarArg)
const {
1938 switch (getEffectiveCallingConv(CC, isVarArg)) {
1960 SDValue ARMTargetLowering::LowerCallResult(
1972 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
1977 if (i == 0 && isThisReturn) {
1979 "unexpected return calling convention register assignment");
2051 Chain, dl, Arg, PtrOff,
2064 unsigned id = Subtarget->
isLittle() ? 0 : 1;
2101 bool isStructRet = (Outs.
empty()) ?
false : Outs[0].Flags.
isSRet();
2102 bool isThisReturn =
false;
2103 bool PreferIndirect =
false;
2109 if (isa<GlobalAddressSDNode>(Callee)) {
2113 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2118 return isa<Instruction>(U) &&
2119 cast<Instruction>(U)->getParent() == BB;
2125 isTailCall = IsEligibleForTailCallOptimization(
2126 Callee, CallConv, isVarArg, isStructRet,
2131 "site marked musttail");
2145 unsigned NumBytes = CCInfo.getNextStackOffset();
2164 for (
unsigned i = 0, realArgIdx = 0,
e = ArgLocs.
size();
2166 ++i, ++realArgIdx) {
2168 SDValue Arg = OutVals[realArgIdx];
2170 bool isByVal = Flags.
isByVal();
2198 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
2199 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
2203 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
2204 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
2208 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
2209 dl, DAG, VA, Flags));
2212 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
2213 StackPtr, MemOpChains, Flags);
2219 "unexpected calling convention register assignment");
2221 "unexpected use of 'returned'");
2222 isThisReturn =
true;
2228 }
else if (isByVal) {
2230 unsigned offset = 0;
2234 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
2235 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
2237 if (CurByValIdx < ByValArgsCount) {
2239 unsigned RegBegin, RegEnd;
2240 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
2245 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
2252 RegsToPass.
push_back(std::make_pair(j, Load));
2257 offset = RegEnd - RegBegin;
2259 CCInfo.nextInRegsParam();
2275 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
2279 }
else if (!isTailCall) {
2282 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2283 dl, DAG, VA, Flags));
2287 if (!MemOpChains.
empty())
2293 for (
unsigned i = 0,
e = RegsToPass.
size(); i !=
e; ++i) {
2295 RegsToPass[i].
second, InFlag);
2302 bool isDirect =
false;
2308 GV =
G->getGlobal();
2312 bool isARMFunc = !Subtarget->
isThumb() || (isStub && !Subtarget->
isMClass());
2313 bool isLocalARMFunc =
false;
2319 "long-calls codegen is not position independent!");
2323 if (isa<GlobalAddressSDNode>(Callee)) {
2336 const char *Sym = S->getSymbol();
2342 ARMPCLabelIndex, 0);
2350 }
else if (isa<GlobalAddressSDNode>(Callee)) {
2351 if (!PreferIndirect) {
2370 "Windows is the only supported COFF target");
2390 const char *Sym = S->getSymbol();
2395 ARMPCLabelIndex, 4);
2411 if ((!isDirect || isARMFunc) && !Subtarget->
hasV5TOps())
2416 if (!isDirect && !Subtarget->
hasV5TOps())
2427 std::vector<SDValue> Ops;
2428 Ops.push_back(Chain);
2429 Ops.push_back(Callee);
2433 for (
unsigned i = 0,
e = RegsToPass.
size(); i !=
e; ++i)
2434 Ops.push_back(DAG.
getRegister(RegsToPass[i].first,
2435 RegsToPass[i].second.getValueType()));
2448 isThisReturn =
false;
2454 assert(Mask &&
"Missing call preserved mask for calling convention");
2459 Ops.push_back(InFlag);
2470 Chain = DAG.
getNode(CallOpc, dl, NodeTys, Ops);
2481 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
2482 InVals, isThisReturn,
2483 isThisReturn ? OutVals[0] :
SDValue());
2490 void ARMTargetLowering::HandleByVal(
CCState *State,
unsigned &
Size,
2491 unsigned Align)
const {
2499 unsigned AlignInRegs = Align / 4;
2500 unsigned Waste = (
ARM::R4 -
Reg) % AlignInRegs;
2501 for (
unsigned i = 0; i < Waste; ++i)
2514 if (NSAAOffset != 0 && Size > Excess) {
2526 unsigned ByValRegBegin =
Reg;
2527 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4,
ARM::R4);
2531 for (
unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2537 Size = std::max<int>(Size - Excess, 0);
2562 }
else if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2570 SDValue Ptr = Ld->getBasePtr();
2587 bool ARMTargetLowering::IsEligibleForTailCallOptimization(
2589 bool isCalleeStructRet,
bool isCallerStructRet,
2593 const bool isIndirect)
const {
2604 (!isa<GlobalAddressSDNode>(Callee.
getNode()) || isIndirect))
2618 if (isCalleeStructRet || isCallerStructRet)
2645 if (CalleeCC != CallerCC) {
2647 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2660 if (!Outs.
empty()) {
2664 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
2672 for (
unsigned i = 0, realArgIdx = 0,
e = ArgLocs.
size();
2674 ++i, ++realArgIdx) {
2677 SDValue Arg = OutVals[realArgIdx];
2688 if (!ArgLocs[++i].isRegLoc())
2691 if (!ArgLocs[++i].isRegLoc())
2693 if (!ArgLocs[++i].isRegLoc())
2718 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2740 if (IntKind ==
"" || IntKind ==
"IRQ" || IntKind ==
"FIQ" ||
2743 else if (IntKind ==
"SWI" || IntKind ==
"UNDEF")
2747 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2774 bool isLittleEndian = Subtarget->
isLittle();
2781 for (
unsigned i = 0, realRVLocIdx = 0;
2783 ++i, ++realRVLocIdx) {
2787 SDValue Arg = OutVals[realRVLocIdx];
2788 bool ReturnF16 =
false;
2831 HalfGPRs.
getValue(isLittleEndian ? 0 : 1),
2837 HalfGPRs.
getValue(isLittleEndian ? 1 : 0),
2852 fmrrd.
getValue(isLittleEndian ? 0 : 1),
2858 fmrrd.
getValue(isLittleEndian ? 1 : 0),
2876 else if (ARM::DPRRegClass.
contains(*I))
2904 bool ARMTargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
2928 if (Copies.
size() > 2)
2963 bool HasRet =
false;
2979 bool ARMTargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
2997 &&
"LowerWRITE_REGISTER called for non-i64 type argument.");
3035 Twine(AFI->createPICLabelUId())
3039 return LowerGlobalAddress(GA, DAG);
3059 unsigned ARMPCLabelIndex = 0;
3062 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
3065 if (!IsPositionIndependent) {
3068 unsigned PCAdj = Subtarget->
isThumb() ? 4 : 8;
3079 if (!IsPositionIndependent)
3110 ARMTargetLowering::LowerGlobalTLSAddressDarwin(
SDValue Op,
3113 "This function expects a Darwin target");
3118 SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);
3155 ARMTargetLowering::LowerGlobalTLSAddressWindows(
SDValue Op,
3199 const auto *GA = cast<GlobalAddressSDNode>(
Op);
3215 unsigned char PCAdj = Subtarget->
isThumb() ? 4 : 8;
3235 Entry.
Node = Argument;
3237 Args.push_back(Entry);
3245 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
3246 return CallResult.first;
3268 unsigned char PCAdj = Subtarget->
isThumb() ? 4 : 8;
3276 PtrVT, dl, Chain, Offset,
3284 PtrVT, dl, Chain, Offset,
3294 PtrVT, dl, Chain, Offset,
3310 return LowerGlobalTLSAddressDarwin(Op, DAG);
3313 return LowerGlobalTLSAddressWindows(Op, DAG);
3322 return LowerToTLSGeneralDynamicModel(GA, DAG);
3325 return LowerToTLSExecModels(GA, DAG, model);
3334 for (
auto *U : V->
users())
3336 while (!Worklist.
empty()) {
3338 if (isa<ConstantExpr>(U)) {
3339 for (
auto *UU : U->users())
3345 if (!
I ||
I->getParent()->getParent() !=
F)
3374 if (!GVar || !GVar->hasInitializer() ||
3375 !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
3376 !GVar->hasLocalLinkage())
3381 auto *
Init = GVar->getInitializer();
3383 Init->needsRelocation())
3395 unsigned RequiredPadding = 4 - (Size % 4);
3396 bool PaddingPossible =
3397 RequiredPadding == 4 || (CDAInit && CDAInit->isString());
3402 unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
3426 if (RequiredPadding != 4) {
3431 while (RequiredPadding--)
3444 ++NumConstpoolPromoted;
3449 if (
const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
3450 if (!(GV = GA->getBaseObject()))
3452 if (
const auto *V = dyn_cast<GlobalVariable>(GV))
3453 return V->isConstant();
3454 return isa<Function>(GV);
3462 return LowerGlobalAddressWindows(Op, DAG);
3464 return LowerGlobalAddressELF(Op, DAG);
3466 return LowerGlobalAddressDarwin(Op, DAG);
3474 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
3493 }
else if (Subtarget->
isROPI() && IsRO) {
3498 }
else if (Subtarget->
isRWPI() && !IsRO) {
3539 "ROPI/RWPI not currently supported for Darwin");
3542 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
3565 "Windows on ARM expects to use movw/movt");
3567 "ROPI/RWPI not currently supported for Windows");
3570 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
3574 else if (!TM.shouldAssumeDSOLocal(*GV->
getParent(), GV))
3609 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(
SDValue Op,
3616 SDValue ARMTargetLowering::LowerINTRINSIC_VOID(
3619 cast<ConstantSDNode>(
3625 case Intrinsic::arm_gnu_eabi_mcount: {
3634 assert(Mask &&
"Missing call preserved mask for calling convention");
3646 ARM::tBL_PUSHLR, dl, ResultTys,
3647 {ReturnAddress, DAG.getTargetConstant(ARMCC::AL, dl, PtrVT),
3648 DAG.getRegister(0, PtrVT), Callee, RegisterMask, Chain}),
3652 {ReturnAddress, Callee, RegisterMask, Chain}),
3661 unsigned IntNo = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
3665 case Intrinsic::thread_pointer: {
3669 case Intrinsic::arm_cls: {
3682 case Intrinsic::arm_cls64: {
3712 case Intrinsic::eh_sjlj_lsda: {
3719 unsigned PCAdj = IsPositionIndependent ? (Subtarget->
isThumb() ? 4 : 8) : 0;
3729 if (IsPositionIndependent) {
3735 case Intrinsic::arm_neon_vabs:
3738 case Intrinsic::arm_neon_vmulls:
3739 case Intrinsic::arm_neon_vmullu: {
3740 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
3745 case Intrinsic::arm_neon_vminnm:
3746 case Intrinsic::arm_neon_vmaxnm: {
3747 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm)
3752 case Intrinsic::arm_neon_vminu:
3753 case Intrinsic::arm_neon_vmaxu: {
3756 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
3761 case Intrinsic::arm_neon_vmins:
3762 case Intrinsic::arm_neon_vmaxs: {
3765 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3770 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3775 case Intrinsic::arm_neon_vtbl1:
3778 case Intrinsic::arm_neon_vtbl2:
3781 case Intrinsic::arm_mve_pred_i2v:
3782 case Intrinsic::arm_mve_pred_v2i:
3801 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
3834 unsigned isRead = ~cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue() & 1;
3840 unsigned isData = cast<ConstantSDNode>(Op.
getOperand(4))->getZExtValue();
3843 isRead = ~isRead & 1;
3844 isData = ~isData & 1;
3870 const SDLoc &dl)
const {
3876 RC = &ARM::tGPRRegClass;
3878 RC = &ARM::GPRRegClass;
3913 const Value *OrigArg,
3914 unsigned InRegsParamRecordIdx,
3915 int ArgOffset,
unsigned ArgSize)
const {
3930 unsigned RBegin, REnd;
3940 ArgOffset = -4 * (
ARM::R4 - RBegin);
3950 for (
unsigned Reg = RBegin, i = 0;
Reg < REnd; ++
Reg, ++i) {
3959 if (!MemOps.
empty())
3968 unsigned TotalArgRegsSaveSize,
3969 bool ForceMutable)
const {
3978 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain,
nullptr,
3981 std::max(4U, TotalArgRegsSaveSize));
3985 SDValue ARMTargetLowering::LowerFormalArguments(
4003 unsigned CurArgIdx = 0;
4015 unsigned ArgRegBegin =
ARM::R4;
4016 for (
unsigned i = 0,
e = ArgLocs.size(); i !=
e; ++i) {
4027 unsigned RBegin, REnd;
4029 ArgRegBegin = std::min(ArgRegBegin, RBegin);
4035 int lastInsIndex = -1;
4039 ArgRegBegin = std::min(ArgRegBegin, (
unsigned)GPRArgRegs[RegIdx]);
4042 unsigned TotalArgRegsSaveSize = 4 * (
ARM::R4 - ArgRegBegin);
4046 for (
unsigned i = 0,
e = ArgLocs.size(); i !=
e; ++i) {
4048 if (Ins[VA.
getValNo()].isOrigArg()) {
4049 std::advance(CurOrigArg,
4050 Ins[VA.
getValNo()].getOrigArgIndex() - CurArgIdx);
4051 CurArgIdx = Ins[VA.
getValNo()].getOrigArgIndex();
4061 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
4072 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
4077 ArgValue, ArgValue1,
4080 ArgValue, ArgValue2,
4083 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
4089 RC = &ARM::HPRRegClass;
4091 RC = &ARM::SPRRegClass;
4093 RC = &ARM::DPRRegClass;
4095 RC = &ARM::QPRRegClass;
4098 : &ARM::GPRRegClass;
4144 if (index != lastInsIndex)
4153 assert(Ins[index].isOrigArg() &&
4154 "Byval arguments cannot be implicit");
4158 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
4173 lastInsIndex = index;
4180 VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
4182 TotalArgRegsSaveSize);
4192 return CFP->getValueAPF().isPosZero();
4198 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
CP->getConstVal()))
4199 return CFP->getValueAPF().isPosZero();
4217 const SDLoc &dl)
const {
4219 unsigned C = RHSC->getZExtValue();
4287 unsigned Mask = cast<ConstantSDNode>(LHS.
getOperand(1))->getZExtValue();
4288 auto *RHSC = cast<ConstantSDNode>(RHS.
getNode());
4289 uint64_t RHSV = RHSC->getZExtValue();
4290 if (
isMask_32(Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) {
4292 if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) {
4307 isa<ConstantSDNode>(RHS) &&
4308 cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U &&
4310 cast<ConstantSDNode>(LHS.
getOperand(1))->getZExtValue() < 31) {
4312 cast<ConstantSDNode>(LHS.
getOperand(1))->getZExtValue() + 1;
4358 bool Signaling)
const {
4395 std::pair<SDValue, SDValue>
4462 return std::make_pair(Value, OverflowCmp);
4473 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
4482 ARMcc, CCR, OverflowCmp);
4490 SDLoc DL(BoolCarry);
4552 if (!Subtarget->
hasDSP())
4593 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
4597 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
4612 if (CMOVTrue && CMOVFalse) {
4614 unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
4618 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
4620 False = SelectFalse;
4621 }
else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
4632 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
4648 bool &swpCmpOps,
bool &swpVselOps) {
4676 swpCmpOps = !swpCmpOps;
4677 swpVselOps = !swpVselOps;
4714 ARMcc, CCR, duplicateCmp(Cmp, DAG));
4741 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
4743 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
4751 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) ||
4753 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal)));
4774 uint64_t &K,
bool &usat) {
4781 const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
4793 SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1)
4796 SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2)
4799 SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2;
4800 SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1;
4801 SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2;
4802 SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2;
4814 if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp ||
4832 if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp)
4838 int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue();
4839 int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue();
4840 int64_t PosVal =
std::max(Val1, Val2);
4841 int64_t NegVal = std::min(Val1, Val2);
4843 if (((Val1 > Val2 && UpperCheckOp == &Op) ||
4844 (Val1 < Val2 && UpperCheckOp == &Op2)) &&
4850 else if (NegVal == 0)
4856 K = (uint64_t)PosVal;
4881 SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS)
4889 SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal;
4890 V = (KTmp == TrueVal) ? FalseVal : TrueVal;
4891 SDValue VTmp = (K && *K == LHS) ? RHS : LHS;
4895 if (*K != KTmp || V != VTmp)
4906 bool ARMTargetLowering::isUnsupportedFloatingType(
EVT VT)
const {
4922 uint64_t SatConstant;
4965 unsigned Opcode = 0;
4967 if (TVal == ~FVal) {
4969 }
else if (TVal == ~FVal + 1) {
4971 }
else if (TVal + 1 == FVal) {
4973 }
else if (TVal == FVal + 1) {
5005 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5007 return DAG.
getNode(Opcode, dl, VT, TrueVal, FalseVal, ARMcc, Cmp);
5017 if (!RHS.getNode()) {
5047 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5049 if (cast<ConstantSDNode>(ARMcc)->getZExtValue() ==
ARMCC::PL)
5051 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
5066 bool swpCmpOps =
false;
5067 bool swpVselOps =
false;
5080 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
5082 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
5086 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
5087 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
5119 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
5121 Ld->getPointerInfo(), Ld->getAlignment(),
5122 Ld->getMemOperand()->getFlags());
5137 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
5138 SDValue Ptr = Ld->getBasePtr();
5141 Ld->getAlignment(), Ld->getMemOperand()->
getFlags());
5144 unsigned NewAlign =
MinAlign(Ld->getAlignment(), 4);
5148 Ld->getPointerInfo().getWithOffset(4), NewAlign,
5149 Ld->getMemOperand()->getFlags());
5167 bool LHSSeenZero =
false;
5169 bool RHSSeenZero =
false;
5171 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
5187 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5190 Chain, Dest, ARMcc, CCR, Cmp);
5202 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
5230 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
5260 if (!RHS.getNode()) {
5282 std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.
getValue(0), DAG, ARMcc);
5299 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5302 Chain, Dest, ARMcc, CCR, Cmp);
5308 if (
SDValue Result = OptimizeVFPBrcond(Op, DAG))
5316 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
5319 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
5358 DAG.
getLoad(PTy, dl, Chain, Addr,
5375 const bool HasFullFP16 =
5404 if (isUnsupportedFloatingType(SrcVal.
getValueType())) {
5414 MakeLibCallOptions CallOptions;
5418 CallOptions, Loc, Chain);
5419 return IsStrict ? DAG.
getMergeValues({Result, Chain}, Loc) : Result;
5447 "Invalid type for custom lowering!");
5449 const bool HasFullFP16 =
5477 return DAG.
getNode(Opc, dl, VT, Op);
5484 if (isUnsupportedFloatingType(VT)) {
5492 MakeLibCallOptions CallOptions;
5509 bool UseNEON = !InGPR && Subtarget->
hasNEON();
5592 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
5594 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
5615 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
5626 Register ARMTargetLowering::getRegisterByName(
const char* RegName,
LLT VT,
5629 .Case(
"sp", ARM::SP)
5645 &&
"ExpandREAD_REGISTER called for non-i64 type result.");
5690 NewIndex *= APIntIndex;
5692 if (NewIndex.getBitWidth() > 32)
5722 const bool HasFullFP16 = Subtarget->
hasFullFP16();
5779 ZeroExtend->getValueType(0) !=
MVT::i32)
5782 auto Copy = ZeroExtend->use_begin();
5867 SDValue LoBigShift = DAG.
getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
5875 ? DAG.getNode(Opc, dl, VT, ShOpHi,
5876 DAG.getConstant(VTBits - 1, dl, VT))
5877 : DAG.getConstant(0, dl, VT);
5878 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl,
MVT::i32),
5884 return DAG.getMergeValues(Ops, dl);
5916 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl,
MVT::i32),
5920 DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo);
5923 return DAG.getMergeValues(Ops, dl);
6006 assert(ST->
hasNEON() &&
"Custom ctpop lowering requires NEON.");
6009 "Unexpected type for custom ctpop lowering");
6017 unsigned EltSize = 8;
6042 APInt SplatBits, SplatUndef;
6043 unsigned SplatBitSize;
6046 !BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6048 SplatBitSize > ElementBits)
6059 assert(VT.
isVector() &&
"vector shift count is not a vector type");
6063 return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
6074 assert(VT.
isVector() &&
"vector shift count is not a vector type");
6079 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
6080 if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) {
6111 "unexpected vector shift opcode");
6114 unsigned VShiftOpc =
6125 unsigned VShiftOpc =
6141 "Unknown shift to lower!");
6214 bool Invert =
false;
6222 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->
get();
6230 "No hardware support for integer vector comparison!");
6257 Merged = DAG.
getNOT(dl, Merged, CmpVT);
6267 switch (SetCCOpcode) {
6299 Result = DAG.
getNOT(dl, Result, VT);
6311 Result = DAG.
getNOT(dl, Result, VT);
6317 switch (SetCCOpcode) {
6353 Result = DAG.
getNOT(dl, Result, VT);
6387 Result = DAG.
getNOT(dl, Result, VT);
6427 const SDLoc &dl,
EVT &VT,
bool is128Bits,
6429 unsigned OpCmode, Imm;
6439 switch (SplatBitSize) {
6444 assert((SplatBits & ~0xff) == 0 &&
"one byte splat value is too big");
6453 if ((SplatBits & ~0xff) == 0) {
6459 if ((SplatBits & ~0xff00) == 0) {
6462 Imm = SplatBits >> 8;
6473 if ((SplatBits & ~0xff) == 0) {
6479 if ((SplatBits & ~0xff00) == 0) {
6482 Imm = SplatBits >> 8;
6485 if ((SplatBits & ~0xff0000) == 0) {
6488 Imm = SplatBits >> 16;
6491 if ((SplatBits & ~0xff000000) == 0) {
6494 Imm = SplatBits >> 24;
6501 if ((SplatBits & ~0xffff) == 0 &&
6502 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
6505 Imm = SplatBits >> 8;
6513 if ((SplatBits & ~0xffffff) == 0 &&
6514 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
6517 Imm = SplatBits >> 16;
6532 uint64_t BitMask = 0xff;
6536 for (
int ByteNum = 0; ByteNum < 8; ++ByteNum) {
6537 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
6540 }
else if ((SplatBits & BitMask) != 0) {
6549 Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
6579 APInt INTVal = FPVal.bitcastToAPInt();
6603 if (IsDouble && !Subtarget->
hasFP64())
6632 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
6637 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
6691 unsigned ExpectedElt = Imm;
6692 for (
unsigned i = 1; i < NumElts; ++i) {
6696 if (ExpectedElt == NumElts)
6699 if (M[i] < 0)
continue;
6700 if (ExpectedElt != static_cast<unsigned>(M[i]))
6708 bool &ReverseVEXT,
unsigned &Imm) {
6710 ReverseVEXT =
false;
6721 unsigned ExpectedElt = Imm;
6722 for (
unsigned i = 1; i < NumElts; ++i) {
6726 if (ExpectedElt == NumElts * 2) {
6731 if (M[i] < 0)
continue;
6732 if (ExpectedElt != static_cast<unsigned>(M[i]))
6747 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
6748 "Only possible block sizes for VREV are: 16, 32, 64");
6755 unsigned BlockElts = M[0] + 1;
6758 BlockElts = BlockSize / EltSz;
6760 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
6763 for (
unsigned i = 0; i < NumElts; ++i) {
6764 if (M[i] < 0)
continue;
6765 if ((
unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
6781 if (Mask.
size() == Elements * 2)
6782 return Index / Elements;
6783 return Mask[
Index] == 0 ? 0 : 1;
6813 if (M.
size() != NumElts && M.
size() != NumElts*2)
6821 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
6823 for (
unsigned j = 0; j < NumElts; j += 2) {
6824 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != j + WhichResult) ||
6825 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != j + NumElts + WhichResult))
6830 if (M.
size() == NumElts*2)
6845 if (M.
size() != NumElts && M.
size() != NumElts*2)
6848 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
6850 for (
unsigned j = 0; j < NumElts; j += 2) {
6851 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != j + WhichResult) ||
6852 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != j + WhichResult))
6857 if (M.
size() == NumElts*2)
6877 if (M.
size() != NumElts && M.
size() != NumElts*2)
6880 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
6882 for (
unsigned j = 0; j < NumElts; ++j) {
6883 if (M[i+j] >= 0 && (
unsigned) M[i+j] != 2 * j + WhichResult)
6888 if (M.
size() == NumElts*2)
6907 if (M.
size() != NumElts && M.
size() != NumElts*2)
6910 unsigned Half = NumElts / 2;
6911 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
6913 for (
unsigned j = 0; j < NumElts; j += Half) {
6914 unsigned Idx = WhichResult;
6915 for (
unsigned k = 0; k < Half; ++k) {
6916 int MIdx = M[i + j + k];
6917 if (MIdx >= 0 && (
unsigned) MIdx != Idx)
6924 if (M.
size() == NumElts*2)
6948 if (M.
size() != NumElts && M.
size() != NumElts*2)
6951 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
6953 unsigned Idx = WhichResult * NumElts / 2;
6954 for (
unsigned j = 0; j < NumElts; j += 2) {
6955 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != Idx) ||
6956 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != Idx + NumElts))
6962 if (M.
size() == NumElts*2)
6981 if (M.
size() != NumElts && M.
size() != NumElts*2)
6984 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
6986 unsigned Idx = WhichResult * NumElts / 2;
6987 for (
unsigned j = 0; j < NumElts; j += 2) {
6988 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != Idx) ||
6989 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != Idx))
6995 if (M.
size() == NumElts*2)
7008 unsigned &WhichResult,
7011 if (
isVTRNMask(ShuffleMask, VT, WhichResult))
7013 if (
isVUZPMask(ShuffleMask, VT, WhichResult))
7015 if (
isVZIPMask(ShuffleMask, VT, WhichResult))
7033 if (NumElts != M.
size())
7037 for (
unsigned i = 0; i != NumElts; ++i)
7038 if (M[i] >= 0 && M[i] != (
int) (NumElts - 1 - i))
7056 unsigned Offset = Top ? 0 : 1;
7057 for (
unsigned i = 0; i < NumElts; i+=2) {
7058 if (M[i] >= 0 && M[i] != (
int)i)
7060 if (M[i+1] >= 0 && M[i+1] != (
int)(NumElts + i +
Offset))
7073 if (!isa<ConstantSDNode>(N))
7075 Val = cast<ConstantSDNode>(
N)->getZExtValue();
7078 if (Val <= 255 || ~Val <= 255)
7096 unsigned BitsPerBool;
7100 }
else if (NumElts == 8) {
7103 }
else if (NumElts == 16) {
7112 if (!isa<ConstantSDNode>(FirstOp) &&
7114 [&FirstOp](
SDUse &U) {
7115 return U.
get().
isUndef() || U.get() == FirstOp;
7123 unsigned Bits32 = 0;
7124 for (
unsigned i = 0; i < NumElts; ++i) {
7126 if (!isa<ConstantSDNode>(V) && !V.
isUndef())
7128 bool BitSet = V.
isUndef() ?
false : cast<ConstantSDNode>(V)->getZExtValue();
7130 Bits32 |= BoolMask << (i * BitsPerBool);
7136 for (
unsigned i = 0; i < NumElts; ++i) {
7138 if (isa<ConstantSDNode>(V) || V.
isUndef())
7158 APInt SplatBits, SplatUndef;
7159 unsigned SplatBitSize;
7161 if (BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
7165 if ((ST->
hasNEON() && SplatBitSize <= 64) ||
7180 uint64_t NegatedImm = (~SplatBits).getZExtValue();
7209 bool isOnlyLowElement =
true;
7210 bool usesOnlyOneValue =
true;
7211 bool hasDominantValue =
false;
7218 for (
unsigned i = 0; i < NumElts; ++i) {
7223 isOnlyLowElement =
false;
7224 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
7227 ValueCounts.
insert(std::make_pair(V, 0));
7228 unsigned &Count = ValueCounts[V];
7231 if (++Count > (NumElts / 2)) {
7232 hasDominantValue =
true;
7236 if (ValueCounts.
size() != 1)
7237 usesOnlyOneValue =
false;
7239 Value = ValueCounts.
begin()->first;
7241 if (ValueCounts.
empty())
7253 if (hasDominantValue && EltSize <= 32) {
7282 if (!usesOnlyOneValue) {
7285 for (
unsigned I = 0;
I < NumElts; ++
I) {
7302 for (
unsigned i = 0; i < NumElts; ++i)
7307 Val = LowerBUILD_VECTOR(Val, DAG, ST);
7311 if (usesOnlyOneValue) {
7313 if (isConstant && Val.
getNode())
7326 SDValue shuffle = ReconstructShuffle(Op, DAG);
7340 Lower = LowerBUILD_VECTOR(Lower, DAG, ST);
7342 HVT, dl,
makeArrayRef(&Ops[NumElts / 2], NumElts / 2));
7344 Upper = LowerBUILD_VECTOR(Upper, DAG, ST);
7352 if (EltSize >= 32) {
7358 for (
unsigned i = 0; i < NumElts; ++i)
7370 if (!isConstant && !usesOnlyOneValue) {
7372 for (
unsigned i = 0 ; i < NumElts; ++i) {
7394 struct ShuffleSourceInfo {
7397 unsigned MaxElt = 0;
7407 int WindowScale = 1;
7409 ShuffleSourceInfo(
SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {}
7417 for (
unsigned i = 0; i < NumElts; ++i) {
7425 }
else if (!isa<ConstantSDNode>(V.
getOperand(1))) {
7438 unsigned EltNo = cast<ConstantSDNode>(V.
getOperand(1))->getZExtValue();
7445 if (Sources.
size() > 2)
7451 for (
auto &
Source : Sources) {
7452 EVT SrcEltTy =
Source.Vec.getValueType().getVectorElementType();
7453 if (SrcEltTy.
bitsLT(SmallestEltTy))
7454 SmallestEltTy = SrcEltTy;
7456 unsigned ResMultiplier =
7464 for (
auto &Src : Sources) {
7465 EVT SrcVT = Src.ShuffleVec.getValueType();
7483 DAG.
getUNDEF(Src.ShuffleVec.getValueType()));
7490 if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
7495 if (Src.MinElt >= NumSrcElts) {
7500 Src.WindowBase = -NumSrcElts;
7501 }
else if (Src.MaxElt < NumSrcElts) {
7518 Src.WindowBase = -Src.MinElt;
7525 for (
auto &Src : Sources) {
7527 if (SrcEltTy == SmallestEltTy)
7532 Src.WindowBase *= Src.WindowScale;
7538 assert(Src.ShuffleVec.getValueType() == ShuffleVT););
7549 int EltNo = cast<ConstantSDNode>(Entry.
getOperand(1))->getSExtValue();
7557 int LanesDefined = BitsDefined / BitsPerShuffleLane;
7561 int *LaneMask = &
Mask[i * ResMultiplier];
7563 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
7564 ExtractBase += NumElts * (Src - Sources.begin());
7565 for (
int j = 0; j < LanesDefined; ++j)
7566 LaneMask[j] = ExtractBase + j;
7572 assert(Sources.size() <= 2 &&
"Too many sources!");
7575 for (
unsigned i = 0; i < Sources.size(); ++i)
7576 ShuffleOps[i] = Sources[i].ShuffleVec;
7579 ShuffleOps[1],
Mask, DAG);
7604 unsigned OpNum = (PFEntry >> 26) & 0x0F;
7624 unsigned PFIndexes[4];
7625 for (
unsigned i = 0; i != 4; ++i) {
7629 PFIndexes[i] = M[i];
7633 unsigned PFTableIndex =
7634 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
7636 unsigned Cost = (PFEntry >> 30);
7642 bool ReverseVEXT, isV_UNDEF;
7643 unsigned Imm, WhichResult;
7646 if (EltSize >= 32 ||
7653 else if (Subtarget->
hasNEON() &&
7673 unsigned OpNum = (PFEntry >> 26) & 0x0F;
7674 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
7675 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
7678 if (LHSID == (1*9+2)*9+3)
return LHS;
7679 assert(LHSID == ((4*9+5)*9+6)*9+7 &&
"Illegal OP_COPY!");
7716 OpLHS, OpRHS).getValue(OpNum-
OP_VUZPL);
7720 OpLHS, OpRHS).getValue(OpNum-
OP_VZIPL);
7724 OpLHS, OpRHS).getValue(OpNum-
OP_VTRNL);
7738 I = ShuffleMask.
begin(),
E = ShuffleMask.
end();
I !=
E; ++
I)
7756 "Expect an v8i16/v16i8 type");
7761 unsigned ExtractNum = (VT ==
MVT::v16i8) ? 8 : 4;
7821 "No support for vector shuffle of boolean predicates");
7866 "Unexpected vector type");
7868 int QuarterSize = NumElts / 4;
7874 auto getMovIdx = [](
ArrayRef<int> ShuffleMask,
int Start,
int Length) {
7877 for (
int i = 0; i < Length; i++) {
7878 if (ShuffleMask[Start + i] >= 0) {
7879 if (ShuffleMask[Start + i] % Length != i)
7881 MovIdx = ShuffleMask[Start + i] / Length;
7889 for (
int i = 1; i < Length; i++) {
7890 if (ShuffleMask[Start + i] >= 0 &&
7891 (ShuffleMask[Start + i] / Length != MovIdx ||
7892 ShuffleMask[Start + i] % Length != i))
7898 for (
int Part = 0; Part < 4; ++Part) {
7900 int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize);
7914 if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3])
7919 if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) {
7921 for (
int Part = 0; Part < 4; ++Part)
7922 for (
int i = 0; i < QuarterSize; i++)
7924 Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]);
7929 for (
int Part = 0; Part < 4; ++Part)
7960 if (EltSize <= 32) {
7964 if (Lane == -1) Lane = 0;
7975 bool IsScalarToVector =
true;
7978 IsScalarToVector =
false;
7981 if (IsScalarToVector)
7988 bool ReverseVEXT =
false;
8014 unsigned WhichResult = 0;
8015 bool isV_UNDEF =
false;
8018 ShuffleMask, VT, WhichResult, isV_UNDEF)) {
8022 .getValue(WhichResult);
8057 }) &&
"Unexpected shuffle index into UNDEF operand!");
8060 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
8063 assert((WhichResult == 0) &&
8064 "In-place shuffle of concat can only have one result!");
8077 unsigned PFIndexes[4];
8078 for (
unsigned i = 0; i != 4; ++i) {
8079 if (ShuffleMask[i] < 0)
8082 PFIndexes[i] = ShuffleMask[i];
8086 unsigned PFTableIndex =
8087 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
8089 unsigned Cost = (PFEntry >> 30);
8095 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8096 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
8106 if (EltSize >= 32) {
8114 for (
unsigned i = 0; i < NumElts; ++i) {
8115 if (ShuffleMask[i] < 0)
8119 ShuffleMask[i] < (
int)NumElts ? V1 : V2,
8147 "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
8151 unsigned Lane = cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue();
8152 unsigned LaneWidth =
8154 unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth;
8166 if (!isa<ConstantSDNode>(Lane))
8197 IVecIn, IElt, Lane);
8210 "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
8214 unsigned Lane = cast<ConstantSDNode>(Op.
getOperand(1))->getZExtValue();
8215 unsigned LaneWidth =
8226 if (!isa<ConstantSDNode>(Lane))
8253 assert(Op1VT == Op2VT &&
"Operand types don't match!");
8255 "Unexpected custom CONCAT_VECTORS lowering");
8257 "CONCAT_VECTORS lowering only supported for MVE");
8274 auto ExractInto = [&DAG, &dl](
SDValue NewV,
SDValue ConVec,
unsigned &j) {
8275 EVT NewVT = NewV.getValueType();
8276 EVT ConcatVT = ConVec.getValueType();
8286 ConVec = ExractInto(NewV1, ConVec, j);
8287 ConVec = ExractInto(NewV2, ConVec, j);
8304 "unexpected CONCAT_VECTORS");
8328 unsigned Index = cast<ConstantSDNode>(
V2)->getZExtValue();
8331 "Unexpected custom EXTRACT_SUBVECTOR lowering");
8333 "EXTRACT_SUBVECTOR lowering only supported for MVE");
8344 for (
unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) {
8370 unsigned HiElt = 1 - LoElt;
8375 if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
8378 if (Hi0->getSExtValue() == Lo0->
getSExtValue() >> 32 &&
8379 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
8382 if (Hi0->isNullValue() && Hi1->isNullValue())
8395 unsigned HalfSize = EltSize / 2;
8397 if (!
isIntN(HalfSize,
C->getSExtValue()))
8400 if (!
isUIntN(HalfSize,
C->getZExtValue()))
8438 switch (OrigSimpleTy) {
8454 unsigned ExtOpcode) {
8506 "Expected extending load");
8512 DAG.
getNode(Opcode,
SDLoc(newLoad),
LD->getValueType(0), newLoad);
8533 unsigned NumElts = VT.getVectorNumElements();
8537 for (
unsigned i = 0; i != NumElts; ++i) {
8574 "unexpected type for custom-lowering ISD::MUL");
8577 unsigned NewOpc = 0;
8581 if (isN0SExt && isN1SExt)
8586 if (isN0ZExt && isN1ZExt)
8588 else if (isN1SExt || isN1ZExt) {
8622 "unexpected types for extended operands to VMULL");
8623 return DAG.
getNode(NewOpc, DL, VT, Op0, Op1);
8637 return DAG.
getNode(N0->getOpcode(), DL, VT,
8718 "unexpected type for custom-lowering ISD::SDIV");
8755 "unexpected type for custom-lowering ISD::UDIV");
8890 bool ShouldUseSRet = Subtarget->
isAPCS_ABI();
8892 if (ShouldUseSRet) {
8894 const uint64_t ByteSize = DL.getTypeAllocSize(RetTy);
8895 const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy);
8896 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign,
false);
8902 Entry.IsSExt =
false;
8903 Entry.IsZExt =
false;
8904 Entry.IsSRet =
true;
8905 Args.push_back(Entry);
8912 Entry.IsSExt =
false;
8913 Entry.IsZExt =
false;
8914 Args.push_back(Entry);
8917 (ArgVT ==
MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
8925 .setCallee(CC, RetTy, Callee, std::move(Args))
8926 .setDiscardResult(ShouldUseSRet);
8927 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
8930 return CallResult.first;
8951 "unexpected type for custom lowering DIV");
8957 const char *
Name =
nullptr;
8959 Name = (VT ==
MVT::i32) ?
"__rt_sdiv" :
"__rt_sdiv64";
8961 Name = (VT ==
MVT::i32) ?
"__rt_udiv" :
"__rt_udiv64";
8967 for (
auto AI : {1, 0}) {
8971 Args.push_back(Arg);
8974 CallLoweringInfo CLI(DAG);
8978 ES, std::move(Args));
8988 ARMTargetLowering::BuildSDIVPow2(
SDNode *N,
const APInt &Divisor,
9007 if (!(MinSize && HasDivide))
9020 if (Divisor.
sgt(128))
9027 bool Signed)
const {
9029 "unexpected type for custom lowering DIV");
9035 return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
9051 void ARMTargetLowering::ExpandDIV_Windows(
9058 "unexpected type for custom lowering DIV");
9063 SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
9078 "Expected a predicate type!");
9081 "Expected a non-extending load");
9108 "Expected a predicate type!");
9119 for (
unsigned I = 0;
I < MemVT.getVectorNumElements();
I++)
9122 for (
unsigned I = MemVT.getVectorNumElements();
I < 16;
I++)
9211 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
9220 "AtomicCmpSwap on types less than 64 should be legal");
9226 ARM::CMP_SWAP_64,
SDLoc(N),
9256 DAG, LHS.
getValueType(), LHS, RHS, CC, dl, LHS, RHS, Chain, IsSignaling);
9257 if (!RHS.getNode()) {
9278 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling);
9279 SDValue Result = getCMOV(dl, VT, False, True, ARMcc, CCR, Cmp, DAG);
9282 Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling);
9283 Result = getCMOV(dl, VT, Result, True, ARMcc, CCR, Cmp, DAG);
9345 return LowerDIV_Windows(Op, DAG,
true);
9349 return LowerDIV_Windows(Op, DAG,
false);
9355 return LowerSignedALUO(Op, DAG);
9358 return LowerUnsignedALUO(Op, DAG);
9375 return LowerDYNAMIC_STACKALLOC(Op, DAG);
9389 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
9391 if (IntNo == Intrinsic::arm_smlald)
9393 else if (IntNo == Intrinsic::arm_smlaldx)
9395 else if (IntNo == Intrinsic::arm_smlsld)
9397 else if (IntNo == Intrinsic::arm_smlsldx)
9440 Res = LowerREM(N, DAG);
9444 Res = LowerDivRem(
SDValue(N, 0), DAG);
9467 lowerABS(N, Results, DAG);
9486 "ROPI/RWPI not currently supported with SjLj");
9496 bool isThumb2 = Subtarget->
isThumb2();
9499 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
9505 : &ARM::GPRRegClass;
9524 BuildMI(*MBB, MI, dl, TII->
get(ARM::t2LDRpci), NewVReg1)
9525 .addConstantPoolIndex(CPI)
9530 BuildMI(*MBB, MI, dl, TII->
get(ARM::t2ORRri), NewVReg2)
9536 BuildMI(*MBB, MI, dl, TII->
get(ARM::tPICADD), NewVReg3)
9539 BuildMI(*MBB, MI, dl, TII->
get(ARM::t2STRi12))
9545 }
else if (isThumb) {
9554 BuildMI(*MBB, MI, dl, TII->
get(ARM::tLDRpci), NewVReg1)
9555 .addConstantPoolIndex(CPI)
9559 BuildMI(*MBB, MI, dl, TII->
get(ARM::tPICADD), NewVReg2)
9564 BuildMI(*MBB, MI, dl, TII->
get(ARM::tMOVi8), NewVReg3)
9569 BuildMI(*MBB, MI, dl, TII->
get(ARM::tORR), NewVReg4)
9575 BuildMI(*MBB, MI, dl, TII->
get(ARM::tADDframe), NewVReg5)
9590 BuildMI(*MBB, MI, dl, TII->
get(ARM::LDRi12), NewVReg1)
9591 .addConstantPoolIndex(CPI)
9596 BuildMI(*MBB, MI, dl, TII->
get(ARM::PICADD), NewVReg2)
9609 void ARMTargetLowering::EmitSjLjDispatchBlock(
MachineInstr &MI,
9619 : &ARM::GPRnopcRegClass;
9624 unsigned MaxCSNum = 0;
9627 if (!BB->isEHPad())
continue;
9632 II = BB->begin(),
IE = BB->end(); II !=
IE; ++II) {
9633 if (!II->isEHLabel())
continue;
9635 MCSymbol *Sym = II->getOperand(0).getMCSymbol();
9640 CSI = CallSiteIdxs.
begin(),
CSE = CallSiteIdxs.
end();
9641 CSI !=
CSE; ++CSI) {
9642 CallSiteNumToLPad[*CSI].push_back(&*BB);
9643 MaxCSNum =
std::max(MaxCSNum, *CSI);
9650 std::vector<MachineBasicBlock*> LPadList;
9652 LPadList.reserve(CallSiteNumToLPad.
size());
9653 for (
unsigned I = 1;
I <= MaxCSNum; ++
I) {
9656 II = MBBList.
begin(),
IE = MBBList.
end(); II !=
IE; ++II) {
9657 LPadList.push_back(*II);
9658 InvokeBBs.
insert((*II)->pred_begin(), (*II)->pred_end());
9662 assert(!LPadList.empty() &&
9663 "No landing pad destinations for the dispatch jump table!");
9677 unsigned trap_opcode;
9679 trap_opcode = ARM::tTRAP;
9696 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
9703 MIB =
BuildMI(DispatchBB, dl, TII->
get(ARM::Int_eh_sjlj_dispatchsetup));
9715 unsigned NumLPads = LPadList.size();
9718 BuildMI(DispatchBB, dl, TII->
get(ARM::t2LDRi12), NewVReg1)
9724 if (NumLPads < 256) {
9725 BuildMI(DispatchBB, dl, TII->
get(ARM::t2CMPri))
9731 BuildMI(DispatchBB, dl, TII->
get(ARM::t2MOVi16), VReg1)
9732 .addImm(NumLPads & 0xFFFF)
9735 unsigned VReg2 = VReg1;
9736 if ((NumLPads & 0xFFFF0000) != 0) {
9738 BuildMI(DispatchBB, dl, TII->
get(ARM::t2MOVTi16), VReg2)
9744 BuildMI(DispatchBB, dl, TII->
get(ARM::t2CMPrr))
9750 BuildMI(DispatchBB, dl, TII->
get(ARM::t2Bcc))
9756 BuildMI(DispContBB, dl, TII->
get(ARM::t2LEApcrelJT), NewVReg3)
9757 .addJumpTableIndex(MJTI)
9761 BuildMI(DispContBB, dl, TII->
get(ARM::t2ADDrs), NewVReg4)
9768 BuildMI(DispContBB, dl, TII->
get(ARM::t2BR_JT))
9772 }
else if (Subtarget->
isThumb()) {
9774 BuildMI(DispatchBB, dl, TII->
get(ARM::tLDRspi), NewVReg1)
9780 if (NumLPads < 256) {
9781 BuildMI(DispatchBB, dl, TII->
get(ARM::tCMPi8))
9797 BuildMI(DispatchBB, dl, TII->
get(ARM::tLDRpci))
9801 BuildMI(DispatchBB, dl, TII->
get(ARM::tCMPr))
9813 BuildMI(DispContBB, dl, TII->
get(ARM::tLSLri), NewVReg2)
9820 BuildMI(DispContBB, dl, TII->
get(ARM::tLEApcrelJT), NewVReg3)
9821 .addJumpTableIndex(MJTI)
9825 BuildMI(DispContBB, dl, TII->
get(ARM::tADDrr), NewVReg4)
9835 BuildMI(DispContBB, dl, TII->
get(ARM::tLDRi), NewVReg5)
9841 unsigned NewVReg6 = NewVReg5;
9842 if (IsPositionIndependent) {
9844 BuildMI(DispContBB, dl, TII->
get(ARM::tADDrr), NewVReg6)
9851 BuildMI(DispContBB, dl, TII->
get(ARM::tBR_JTr))
9856 BuildMI(DispatchBB, dl, TII->
get(ARM::LDRi12), NewVReg1)
9862 if (NumLPads < 256) {
9863 BuildMI(DispatchBB, dl, TII->
get(ARM::CMPri))
9869 BuildMI(DispatchBB, dl, TII->
get(ARM::MOVi16), VReg1)
9870 .addImm(NumLPads & 0xFFFF)
9873 unsigned VReg2 = VReg1;
9874 if ((NumLPads & 0xFFFF0000) != 0) {
9876 BuildMI(DispatchBB, dl, TII->
get(ARM::MOVTi16), VReg2)
9882 BuildMI(DispatchBB, dl, TII->
get(ARM::CMPrr))
9898 BuildMI(DispatchBB, dl, TII->
get(ARM::LDRcp))
9903 BuildMI(DispatchBB, dl, TII->
get(ARM::CMPrr))
9915 BuildMI(DispContBB, dl, TII->
get(ARM::MOVsi), NewVReg3)
9921 BuildMI(DispContBB, dl, TII->
get(ARM::LEApcrelJT), NewVReg4)
9922 .addJumpTableIndex(MJTI)
9928 BuildMI(DispContBB, dl, TII->
get(ARM::LDRrs), NewVReg5)
9935 if (IsPositionIndependent) {
9936 BuildMI(DispContBB, dl, TII->
get(ARM::BR_JTadd))
9941 BuildMI(DispContBB, dl, TII->
get(ARM::BR_JTr))
9949 for (std::vector<MachineBasicBlock*>::iterator
9950 I = LPadList.begin(),
E = LPadList.end();
I !=
E; ++
I) {
9952 if (SeenMBBs.
insert(CurMBB).second)
9965 while (!Successors.empty()) {
9974 BB->normalizeSuccProbs();
9981 II = BB->rbegin(),
IE = BB->rend(); II !=
IE; ++II) {
9982 if (!II->isCall())
continue;
9986 OI = II->operands_begin(), OE = II->operands_end();
9988 if (!OI->isReg())
continue;
9989 DefRegs[OI->getReg()] =
true;
9994 for (
unsigned i = 0; SavedRegs[i] != 0; ++i) {
9995 unsigned Reg = SavedRegs[i];
9997 !ARM::tGPRRegClass.contains(Reg) &&
9998 !ARM::hGPRRegClass.contains(Reg))
10000 if (Subtarget->
isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
10002 if (!Subtarget->
isThumb() && !ARM::GPRRegClass.contains(Reg))
10016 (*I)->setIsEHPad(
false);
10033 static unsigned getLdOpcode(
unsigned LdSize,
bool IsThumb1,
bool IsThumb2) {
10035 return LdSize == 16 ? ARM::VLD1q32wb_fixed
10036 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
10038 return LdSize == 4 ? ARM::tLDRi
10039 : LdSize == 2 ? ARM::tLDRHi
10040 : LdSize == 1 ? ARM::tLDRBi : 0;
10042 return LdSize == 4 ? ARM::t2LDR_POST
10043 : LdSize == 2 ? ARM::t2LDRH_POST
10044 : LdSize == 1 ? ARM::t2LDRB_POST : 0;
10045 return LdSize == 4 ? ARM::LDR_POST_IMM
10046 : LdSize == 2 ? ARM::LDRH_POST
10047 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
10052 static unsigned getStOpcode(
unsigned StSize,
bool IsThumb1,
bool IsThumb2) {
10054 return StSize == 16 ? ARM::VST1q32wb_fixed
10055 : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
10057 return StSize == 4 ? ARM::tSTRi
10058 : StSize == 2 ? ARM::tSTRHi
10059 : StSize == 1 ? ARM::tSTRBi : 0;
10061 return StSize == 4 ? ARM::t2STR_POST
10062 : StSize == 2 ? ARM::t2STRH_POST
10063 : StSize == 1 ? ARM::t2STRB_POST : 0;
10064 return StSize == 4 ? ARM::STR_POST_IMM
10065 : StSize == 2 ? ARM::STRH_POST
10066 : StSize == 1 ? ARM::STRB_POST_IMM : 0;
10073 unsigned LdSize,
unsigned Data,
unsigned AddrIn,
10074 unsigned AddrOut,
bool IsThumb1,
bool IsThumb2) {
10075 unsigned LdOpc =
getLdOpcode(LdSize, IsThumb1, IsThumb2);
10076 assert(LdOpc != 0 &&
"Should have a load opcode");
10083 }
else if (IsThumb1) {
10089 BuildMI(*BB, Pos, dl, TII->
get(ARM::tADDi8), AddrOut)
10094 }
else if (IsThumb2) {
10114 unsigned StSize,
unsigned Data,
unsigned AddrIn,
10115 unsigned AddrOut,
bool IsThumb1,
bool IsThumb2) {
10116 unsigned StOpc =
getStOpcode(StSize, IsThumb1, IsThumb2);
10117 assert(StOpc != 0 &&
"Should have a store opcode");
10119 BuildMI(*BB, Pos, dl, TII->
get(StOpc), AddrOut)
10124 }
else if (IsThumb1) {
10131 BuildMI(*BB, Pos, dl, TII->
get(ARM::tADDi8), AddrOut)
10136 }
else if (IsThumb2) {
10137 BuildMI(*BB, Pos, dl, TII->
get(StOpc), AddrOut)
10143 BuildMI(*BB, Pos, dl, TII->
get(StOpc), AddrOut)
10170 unsigned UnitSize = 0;
10175 bool IsThumb2 = Subtarget->
isThumb2();
10176 bool IsThumb = Subtarget->
isThumb();
10180 }
else if (Align & 2) {
10186 if ((Align % 16 == 0) && SizeVal >= 16)
10188 else if ((Align % 8 == 0) && SizeVal >= 8)
10197 bool IsNeon = UnitSize >= 8;
10198 TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
10200 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
10201 : UnitSize == 8 ? &ARM::DPRRegClass
10204 unsigned BytesLeft = SizeVal % UnitSize;
10205 unsigned LoopSize = SizeVal - BytesLeft;
10207 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
10211 unsigned srcIn = src;
10212 unsigned destIn = dest;
10213 for (
unsigned i = 0; i < LoopSize; i+=UnitSize) {
10217 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
10218 IsThumb1, IsThumb2);
10219 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
10220 IsThumb1, IsThumb2);
10228 for (
unsigned i = 0; i < BytesLeft; i++) {
10232 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
10233 IsThumb1, IsThumb2);
10234 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
10235 IsThumb1, IsThumb2);
10265 MF->
insert(It, loopMBB);
10266 MF->
insert(It, exitMBB);
10276 unsigned Vtmp = varEnd;
10277 if ((LoopSize & 0xFFFF0000) != 0)
10279 BuildMI(BB, dl, TII->
get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp)
10280 .addImm(LoopSize & 0xFFFF)
10283 if ((LoopSize & 0xFFFF0000) != 0)
10284 BuildMI(BB, dl, TII->
get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd)
10303 BuildMI(*BB, MI, dl, TII->
get(ARM::tLDRpci))
10332 .addReg(varLoop).
addMBB(loopMBB)
10335 .addReg(srcLoop).
addMBB(loopMBB)
10337 BuildMI(BB, dl, TII->
get(ARM::PHI), destPhi)
10338 .addReg(destLoop).
addMBB(loopMBB)
10345 IsThumb1, IsThumb2);
10347 IsThumb1, IsThumb2);
10351 BuildMI(*BB, BB->
end(), dl, TII->
get(ARM::tSUBi8), varLoop)
10359 TII->
get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
10368 TII->
get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
10377 auto StartOfExit = exitMBB->
begin();
10381 unsigned srcIn = srcLoop;
10382 unsigned destIn = destLoop;
10383 for (
unsigned i = 0; i < BytesLeft; i++) {
10387 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
10388 IsThumb1, IsThumb2);
10389 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
10390 IsThumb1, IsThumb2);
10400 ARMTargetLowering::EmitLowered__chkstk(
MachineInstr &MI,
10407 "__chkstk is only supported on Windows");
10408 assert(Subtarget->
isThumb2() &&
"Windows on ARM requires Thumb-2 mode");
10436 .addExternalSymbol(
"__chkstk")
10449 .addExternalSymbol(
"__chkstk");
10463 BuildMI(*MBB, MI, DL, TII.
get(ARM::t2SUBrr), ARM::SP)
10475 ARMTargetLowering::EmitLowered__dbzchk(
MachineInstr &MI,
10489 BuildMI(TrapBB, DL, TII->
get(ARM::t__brkdiv0));
10493 BuildMI(*MBB, MI, DL, TII->
get(ARM::tCMPi8))
10526 if (miI == BB->
end()) {
10529 sItr != sEnd; ++sItr) {
10538 SelectItr->addRegisterKilled(ARM::CPSR, TRI);
10547 bool isThumb2 = Subtarget->
isThumb2();
10555 case ARM::tLDR_postidx: {
10557 BuildMI(*BB, MI, dl, TII->
get(ARM::tLDMIA_UPD))
10571 case ARM::t2STR_preidx:
10574 case ARM::t2STRB_preidx:
10577 case ARM::t2STRH_preidx:
10581 case ARM::STRi_preidx:
10582 case ARM::STRBi_preidx: {
10583 unsigned NewOpc = MI.
getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
10584 : ARM::STRB_PRE_IMM;
10600 .addMemOperand(MMO);
10604 case ARM::STRr_preidx:
10605 case ARM::STRBr_preidx:
10606 case ARM::STRH_preidx: {
10610 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG;
break;
10611 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG;
break;
10612 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE;
break;
10621 case ARM::tMOVCCr_pseudo: {
10639 F->
insert(It, copy0MBB);
10686 case ARM::BCCZi64: {
10692 bool RHSisZero = MI.
getOpcode() == ARM::BCCZi64;
10697 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
10701 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
10707 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
10711 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
10712 .addReg(LHS2).
addReg(RHS2)
10721 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
10734 case ARM::Int_eh_sjlj_setjmp:
10735 case ARM::Int_eh_sjlj_setjmp_nofp:
10736 case ARM::tInt_eh_sjlj_setjmp:
10737 case ARM::t2Int_eh_sjlj_setjmp:
10738 case ARM::t2Int_eh_sjlj_setjmp_nofp:
10741 case ARM::Int_eh_sjlj_setup_dispatch:
10742 EmitSjLjDispatchBlock(MI, BB);
10765 Fn->
insert(BBI, SinkBB);
10770 bool isThumb2 = Subtarget->
isThumb2();
10775 isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);
10789 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
10796 TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
10803 TII->
get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
10812 TII->
get(ARM::PHI), ABSDstReg)
10813 .addReg(NewRsbDstReg).
addMBB(RSBBB)
10822 case ARM::COPY_STRUCT_BYVAL_I32:
10824 return EmitStructByval(MI, BB);
10826 return EmitLowered__chkstk(MI, BB);
10828 return EmitLowered__dbzchk(MI, BB);
10855 : &ARM::GPRRegClass);
10880 MCID = &TII->get(NewOpc);
10884 &&
"converted opcode should be the same except for cc_out" 10885 " (and, on Thumb1, pred)");
10920 assert(!NewOpc &&
"Optional cc_out operand required");
10925 bool definesCPSR =
false;
10926 bool deadCPSR =
false;
10931 definesCPSR =
true;
10938 if (!definesCPSR) {
10939 assert(!NewOpc &&
"Optional cc_out operand required");
10945 "expect uninitialized optional cc_out operand");
10984 default:
return false;
11055 bool AllOnes =
false) {
11060 bool SwapSelectOps;
11062 NonConstantVal, DAG))
11068 OtherOp, NonConstantVal);
11074 CCOp, TrueVal, FalseVal);
11166 Opcode = Intrinsic::arm_neon_vpaddls;
11168 Opcode = Intrinsic::arm_neon_vpaddlu;
11211 unsigned nextIndex = 0;
11234 || C1->getZExtValue() != nextIndex+1)
11279 return DAG.
getNode(ExtOp, dl, VT, tmp);
11316 if (
auto Const = dyn_cast<ConstantSDNode>(SRA.
getOperand(1))) {
11317 if (Const->getZExtValue() != 31)
11326 SDLoc dl(AddcNode);
11327 unsigned Opcode = 0;
11362 SDValue resNode(AddcNode, 0);
11391 "Expect an ADDE or SUBE");
11395 "ADDE node has the wrong inputs");
11412 assert(AddcSubcNode->getNumValues() == 2 &&
11413 AddcSubcNode->getValueType(0) ==
MVT::i32 &&
11414 "Expect ADDC with two result values. First: i32");
11434 bool IsLeftOperandMUL =
false;
11439 IsLeftOperandMUL =
true;
11450 SDValue *LowAddSub =
nullptr;
11453 if ((AddeSubeOp0 != MULOp.
getValue(1)) && (AddeSubeOp1 != MULOp.
getValue(1)))
11456 if (IsLeftOperandMUL)
11457 HiAddSub = &AddeSubeOp1;
11459 HiAddSub = &AddeSubeOp0;
11464 if (AddcSubcOp0 == MULOp.
getValue(0)) {
11465 LoMul = &AddcSubcOp0;
11466 LowAddSub = &AddcSubcOp1;
11468 if (AddcSubcOp1 == MULOp.
getValue(0)) {
11469 LoMul = &AddcSubcOp1;
11470 LowAddSub = &AddcSubcOp0;
11478 if (AddcSubcNode == HiAddSub->getNode() ||
11497 static_cast<ConstantSDNode *
>(LowAddSub->getNode())->getZExtValue() ==
11508 return SDValue(AddeSubeNode, 0);
11529 return SDValue(AddeSubeNode, 0);
11550 SDNode *UmlalNode =
nullptr;
11628 int32_t imm =
C->getSExtValue();
11629 if (imm < 0 && imm > std::numeric_limits<int>::min()) {
11649 int64_t imm =
C->getSExtValue();
11746 if (
auto *Const = dyn_cast<ConstantSDNode>(N1->
getOperand(1))) {
11747 if (Const->getAPIntValue().ult(256))
11750 Const->getAPIntValue().sgt(-256))
11810 for (
auto U : N->
uses()) {
11811 switch(U->getOpcode()) {
11824 if (isa<ConstantSDNode>(U->getOperand(0)) ||
11825 isa<ConstantSDNode>(U->getOperand(1)))
11829 if (U->getOperand(0).getOpcode() ==
ISD::SHL ||
11830 U->getOperand(1).getOpcode() ==
ISD::SHL)
11847 if (!C1ShlC2 || !C2)
11850 APInt C2Int = C2->getAPIntValue();
11851 APInt C1Int = C1ShlC2->getAPIntValue();
11856 if ((C1Int & Mask) != C1Int)
11863 auto LargeImm = [](
const APInt &Imm) {
11864 unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros();
11865 return Imm.getBitWidth() - Zeros > 8;
11868 if (LargeImm(C1Int) || LargeImm(C2Int))
11984 return DAG.
getNode(Opcode, DL, VT,
12011 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
12013 ShiftAmt = ShiftAmt & (32 - 1);
12018 MulAmt >>= ShiftAmt;
12040 uint64_t MulAmtAbs = -MulAmt;
12088 if (C1 == 255 || C1 == 65535)
12105 if (!C2 || C2 >= 32)
12149 if (Trailing == C2 && C2 + C3 < 32) {
12162 if (Leading == C2 && C2 + C3 < 32) {
12188 APInt SplatBits, SplatUndef;
12189 unsigned SplatBitSize;
12191 if (BVN && Subtarget->
hasNEON() &&
12192 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
12193 if (SplatBitSize <= 64) {
12270 unsigned Opcode = 0;
12271 if (
isS16(OpS16, DAG))
12320 if (Mask == 0xffff)
12327 if ((Val & ~Mask) != Val)
12352 (Mask == ~Mask2)) {
12355 if (Subtarget->
hasDSP() &&
12356 (Mask == 0xffff || Mask == 0xffff0000))
12369 (~Mask == Mask2)) {
12372 if (Subtarget->
hasDSP() &&
12373 (Mask2 == 0xffff || Mask2 == 0xffff0000))
12394 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
12495 APInt SplatBits, SplatUndef;
12496 unsigned SplatBitSize;
12498 if (BVN && Subtarget->
hasNEON() &&
12499 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
12500 if (SplatBitSize <= 64) {
12537 unsigned SplatBitSize;
12540 APInt SplatBits0, SplatBits1;
12544 if (BVN0 && BVN0->
isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
12545 HasAnyUndefs) && !HasAnyUndefs) {
12546 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
12547 HasAnyUndefs) && !HasAnyUndefs) {
12552 SplatBits0 == ~SplatBits1) {
12611 ToMask = ~cast<ConstantSDNode>(N->
getOperand(2))->getAPIntValue();
12616 if (From->getOpcode() ==
ISD::SRL &&
12617 isa<ConstantSDNode>(From->getOperand(1))) {
12618 APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue();
12621 From = From->getOperand(0);
12633 return LastActiveBitInA - 1 == FirstActiveBitInB;
12639 APInt ToMask, FromMask;
12647 APInt CombinedToMask = ToMask;
12649 APInt NewToMask, NewFromMask;
12651 if (NewFrom != From) {
12653 CombinedToMask |= NewToMask;
12659 if ((NewToMask & CombinedToMask).getBoolValue())
12672 CombinedToMask |= NewToMask;
12689 unsigned InvMask = cast<ConstantSDNode>(N->
getOperand(2))->getZExtValue();
12693 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
12694 "undefined behavior");
12697 if ((Mask & (~Mask2)) == 0)
12710 APInt ToMask1, FromMask1;
12713 APInt ToMask2, FromMask2;
12721 APInt NewFromMask = FromMask1 | FromMask2;
12722 APInt NewToMask = ToMask1 | ToMask2;
12727 if (NewFromMask[0] == 0)
12764 DAG.getConstant(4, DL,
MVT::i32));
12805 for (
unsigned i = 0; i < NumElts; ++i) {
12835 for (
unsigned i = 0; i < NumElts; ++i) {
12882 unsigned NumOfBitCastedElts = 0;
12884 unsigned NumOfRelevantElts = NumElts;
12885 for (
unsigned Idx = 0; Idx < NumElts; ++Idx) {
12890 ++NumOfBitCastedElts;
12891 }
else if (Elt.
isUndef() || isa<ConstantSDNode>(Elt))
12894 --NumOfRelevantElts;
12898 if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
12906 if (!TLI.isTypeLegal(VecVT))
12916 for (
unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
13052 unsigned HalfElts = NumElts/2;
13054 for (
unsigned n = 0; n < NumElts; ++n) {
13057 if (MaskElt < (
int)HalfElts)
13059 else if (MaskElt >= (
int)NumElts && MaskElt < (
int)(NumElts + HalfElts))
13060 NewElt = HalfElts + MaskElt - NumElts;
13078 const unsigned AddrOpIdx = ((isIntrinsic ||
isStore) ? 2 : 1);
13088 UI.getUse().getResNo() != Addr.
getResNo())
13104 bool isLoadOp =
true;
13105 bool isLaneOp =
false;
13106 unsigned NewOpc = 0;
13107 unsigned NumVecs = 0;
13109 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue();
13113 NumVecs = 1;
break;
13115 NumVecs = 2;
break;
13117 NumVecs = 3;
break;
13119 NumVecs = 4;
break;
13120 case Intrinsic::arm_neon_vld2dup:
13121 case Intrinsic::arm_neon_vld3dup:
13122 case Intrinsic::arm_neon_vld4dup:
13127 NumVecs = 2; isLaneOp =
true;
break;
13129 NumVecs = 3; isLaneOp =
true;
break;
13131 NumVecs = 4; isLaneOp =
true;
break;
13133 NumVecs = 1; isLoadOp =
false;
break;
13135 NumVecs = 2; isLoadOp =
false;
break;
13137 NumVecs = 3; isLoadOp =
false;
break;
13139 NumVecs = 4; isLoadOp =
false;
break;
13141 NumVecs = 2; isLoadOp =
false; isLaneOp =
true;
break;
13143 NumVecs = 3; isLoadOp =
false; isLaneOp =
true;
break;
13145 NumVecs = 4; isLoadOp =
false; isLaneOp =
true;
break;
13156 NumVecs = 1; isLaneOp =
false;
break;
13158 NumVecs = 1; isLaneOp =
false; isLoadOp =
false;
break;
13166 }
else if (isIntrinsic) {
13169 assert(isStore &&
"Node has to be a load, a store, or an intrinsic!");
13180 if (NumBytes >= 3 * 16 && (!CInc || CInc->
getZExtValue() != NumBytes)) {
13189 EVT AlignedVecTy = VecTy;
13206 if (isa<LSBaseSDNode>(N)) {
13207 if (Alignment == 0)
13211 assert(NumVecs == 1 &&
"Unexpected multi-element generic load/store.");
13212 assert(!isLaneOp &&
"Unexpected generic load/store lane.");
13229 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
13231 for (n = 0; n < NumResultVecs; ++n)
13232 Tys[n] = AlignedVecTy;
13243 if (
StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
13249 for (
unsigned i = AddrOpIdx + 1; i < N->
getNumOperands() - 1; ++i)
13269 for (
unsigned i = 0; i < NumResultVecs; ++i)
13275 SDValue &LdVal = NewResults[0];
13311 unsigned NumVecs = 0;
13312 unsigned NewOpc = 0;
13313 unsigned IntNo = cast<ConstantSDNode>(VLD->
getOperand(1))->getZExtValue();
13314 if (IntNo == Intrinsic::arm_neon_vld2lane) {
13317 }
else if (IntNo == Intrinsic::arm_neon_vld3lane) {
13320 }
else if (IntNo == Intrinsic::arm_neon_vld4lane) {
13329 unsigned VLDLaneNo =
13330 cast<ConstantSDNode>(VLD->
getOperand(NumVecs+3))->getZExtValue();
13334 if (UI.getUse().getResNo() == NumVecs)
13338 VLDLaneNo != cast<ConstantSDNode>(User->
getOperand(1))->getZExtValue())
13345 for (n = 0; n < NumVecs; ++n)
13358 unsigned ResNo = UI.getUse().
getResNo();
13360 if (ResNo == NumVecs)
13368 std::vector<SDValue> VLDDupResults;
13369 for (
unsigned n = 0; n < NumVecs; ++n)
13370 VLDDupResults.push_back(
SDValue(VLDDup.getNode(), n));
13371 VLDDupResults.push_back(
SDValue(VLDDup.getNode(), NumVecs));
13398 unsigned Imm = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
13462 assert(StVT != VT &&
"Cannot truncate to the same type");
13472 if (0 != (NumElems * FromEltSz) % ToEltSz)
13475 unsigned SizeRatio = FromEltSz / ToEltSz;
13480 NumElems * SizeRatio);
13486 for (
unsigned i = 0; i < NumElems; ++i)
13502 if (TLI.
isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
13522 for (
unsigned I = 0;
I <
E;
I++) {
13529 DAG.
getNode(
ISD::ADD, DL, BasePtr.getValueType(), BasePtr, Increment);
13553 unsigned NumElements = 0;
13575 unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8;
13583 NewToVT, Alignment, MMOFlags, AAInfo);
13584 Stores.push_back(Store);
13685 if (!isa<BuildVectorSDNode>(ConstVec))
13693 if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
13704 if (C == -1 || C == 0 || C > 32)
13709 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
13710 Intrinsic::arm_neon_vcvtfp2fxu;
13716 if (IntBits < FloatBits)
13743 if (!isa<BuildVectorSDNode>(ConstVec))
13751 if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
13762 if (C == -1 || C == 0 || C > 32)
13768 if (IntBits < FloatBits)
13773 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
13774 Intrinsic::arm_neon_vcvtfxu2fp;
13783 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
13794 case Intrinsic::arm_neon_vshifts:
13795 case Intrinsic::arm_neon_vshiftu:
13796 case Intrinsic::arm_neon_vrshifts:
13797 case Intrinsic::arm_neon_vrshiftu:
13798 case Intrinsic::arm_neon_vrshiftn:
13799 case Intrinsic::arm_neon_vqshifts:
13800 case Intrinsic::arm_neon_vqshiftu:
13801 case Intrinsic::arm_neon_vqshiftsu:
13802 case Intrinsic::arm_neon_vqshiftns:
13803 case Intrinsic::arm_neon_vqshiftnu:
13804 case Intrinsic::arm_neon_vqshiftnsu:
13805 case Intrinsic::arm_neon_vqrshiftns:
13806 case Intrinsic::arm_neon_vqrshiftnu:
13807 case Intrinsic::arm_neon_vqrshiftnsu: {
13810 unsigned VShiftOpc = 0;
13813 case Intrinsic::arm_neon_vshifts:
13814 case Intrinsic::arm_neon_vshiftu:
13826 case Intrinsic::arm_neon_vrshifts:
13827 case Intrinsic::arm_neon_vrshiftu:
13832 case Intrinsic::arm_neon_vqshifts:
13833 case Intrinsic::arm_neon_vqshiftu:
13838 case Intrinsic::arm_neon_vqshiftsu:
13843 case Intrinsic::arm_neon_vrshiftn:
13844 case Intrinsic::arm_neon_vqshiftns:
13845 case Intrinsic::arm_neon_vqshiftnu:
13846 case Intrinsic::arm_neon_vqshiftnsu:
13847 case Intrinsic::arm_neon_vqrshiftns:
13848 case Intrinsic::arm_neon_vqrshiftnu:
13849 case Intrinsic::arm_neon_vqrshiftnsu:
13861 case Intrinsic::arm_neon_vshifts:
13862 case Intrinsic::arm_neon_vshiftu:
13865 case Intrinsic::arm_neon_vrshifts:
13868 case Intrinsic::arm_neon_vrshiftu:
13871 case Intrinsic::arm_neon_vrshiftn:
13874 case Intrinsic::arm_neon_vqshifts:
13877 case Intrinsic::arm_neon_vqshiftu:
13880 case Intrinsic::arm_neon_vqshiftsu:
13883 case Intrinsic::arm_neon_vqshiftns:
13886 case Intrinsic::arm_neon_vqshiftnu:
13889 case Intrinsic::arm_neon_vqshiftnsu:
13892 case Intrinsic::arm_neon_vqrshiftns:
13895 case Intrinsic::arm_neon_vqrshiftnu:
13898 case Intrinsic::arm_neon_vqrshiftnsu:
13908 case Intrinsic::arm_neon_vshiftins: {
13911 unsigned VShiftOpc = 0;
13927 case Intrinsic::arm_neon_vqrshifts:
13928 case Intrinsic::arm_neon_vqrshiftu:
13978 if (AndMask == 255 || AndMask == 65535)
13982 if (MaskedBits > ShiftAmt) {
14016 unsigned VShiftOpc =
14045 unsigned NumElements = 0;
14050 if (NumElements == 0 ||
14082 Alignment, MMOFlags, AAInfo);
14112 isa<ConstantSDNode>(Lane)) {
14160 auto CCNode = cast<ConstantSDNode>(CMOV->
getOperand(2));
14161 auto CC = CCNode->getAPIntValue().getLimitedValue();
14198 unsigned Heuristic = Subtarget->
isThumb() ? 3 : 2;
14205 if ((OrCI & Known.
Zero) != OrCI)
14212 unsigned BitInX = AndC->
logBase2();
14220 for (
unsigned BitInY = 0, NumActiveBits = OrCI.
getActiveBits();
14221 BitInY < NumActiveBits; ++BitInY) {
14222 if (OrCI[BitInY] == 0)
14225 Mask.setBit(BitInY);
14248 if (!cast<ConstantSDNode>(N.
getOperand(1))->isOne())
14257 if (Const->isNullValue())
14259 else if (Const->isOne())
14263 CC = cast<CondCodeSDNode>(N.
getOperand(2))->
get();
14267 unsigned IntOp = cast<ConstantSDNode>(N.
getOperand(1))->getZExtValue();
14268 if (IntOp != Intrinsic::test_set_loop_iterations &&
14269 IntOp != Intrinsic::loop_decrement_reg)
14295 bool Negate =
false;
14305 CC = cast<CondCodeSDNode>(N->
getOperand(1))->
get();
14308 if (
auto *Const = dyn_cast<ConstantSDNode>(N->
getOperand(3))) {
14309 if (!Const->isOne() && !Const->isNullValue())
14311 Imm = Const->getZExtValue();
14339 assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) &&
14340 "unsupported condition");
14345 unsigned IntOp = cast<ConstantSDNode>(Int->
getOperand(1))->getZExtValue();
14347 &&
"expected single br user");
14358 if (IntOp == Intrinsic::test_set_loop_iterations) {
14361 if (IsTrueIfZero(CC, Imm)) {
14362 SDValue Ops[] = { Chain, Elements, Dest };
14367 UpdateUncondBr(Br, Dest, DAG);
14369 SDValue Ops[] = { Chain, Elements, OtherTarget };
14383 SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget;
14387 if (Target == OtherTarget)
14388 UpdateUncondBr(Br, Dest, DAG);
14426 if ((LHS00C && LHS00C->getZExtValue() == 0) &&
14427 (LHS01C && LHS01C->getZExtValue() == 1) &&
14428 (LHS1C && LHS1C->getZExtValue() == 1) &&
14429 (RHSC && RHSC->getZExtValue() == 0)) {
14482 if (CC ==
ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
14485 }
else if (CC ==
ARMCC::EQ && TrueVal == RHS) {
14498 if ((LHS0C && LHS0C->getZExtValue() == 0) &&
14499 (LHS1C && LHS1C->getZExtValue() == 1) &&
14500 (RHSC && RHSC->getZExtValue() == 0)) {
14582 const APInt *TrueConst;
14589 unsigned ShiftAmount = TrueConst->
logBase2();
14603 if (Known.
Zero == 0xfffffffe)
14606 else if (Known.
Zero == 0xffffff00)
14609 else if (Known.
Zero == 0xffff0000)
14734 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
14735 case Intrinsic::arm_neon_vld1:
14736 case Intrinsic::arm_neon_vld1x2:
14737 case Intrinsic::arm_neon_vld1x3:
14738 case Intrinsic::arm_neon_vld1x4:
14739 case Intrinsic::arm_neon_vld2:
14740 case Intrinsic::arm_neon_vld3:
14741 case Intrinsic::arm_neon_vld4:
14742 case Intrinsic::arm_neon_vld2lane:
14743 case Intrinsic::arm_neon_vld3lane:
14744 case Intrinsic::arm_neon_vld4lane:
14745 case Intrinsic::arm_neon_vld2dup:
14746 case Intrinsic::arm_neon_vld3dup:
14747 case Intrinsic::arm_neon_vld4dup:
14748 case Intrinsic::arm_neon_vst1:
14749 case Intrinsic::arm_neon_vst1x2:
14750 case Intrinsic::arm_neon_vst1x3:
14751 case Intrinsic::arm_neon_vst1x4:
14752 case Intrinsic::arm_neon_vst2:
14753 case Intrinsic::arm_neon_vst3:
14754 case Intrinsic::arm_neon_vst4:
14755 case Intrinsic::arm_neon_vst2lane:
14756 case Intrinsic::arm_neon_vst3lane:
14757 case Intrinsic::arm_neon_vst4lane:
14772 unsigned Alignment,
14774 bool *
Fast)
const {
14785 if (AllowsUnaligned) {
14796 if (Subtarget->
hasNEON() && (AllowsUnaligned || Subtarget->
isLittle())) {
14843 unsigned AlignCheck) {
14844 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
14845 (DstAlign == 0 || DstAlign % AlignCheck == 0));
14849 uint64_t Size,
unsigned DstAlign,
unsigned SrcAlign,
bool IsMemset,
14850 bool ZeroMemset,
bool MemcpyStrSrc,
14853 if ((!IsMemset || ZeroMemset) && Subtarget->
hasNEON() &&
14862 }
else if (Size >= 8 &&
14883 return (SrcBits == 64 && DestBits == 32);
14892 return (SrcBits == 64 && DestBits == 32);
14938 return Ext->getType()->getScalarSizeInBits() ==
14939 2 *
Ext->getOperand(0)->getType()->getScalarSizeInBits();
14944 !areExtDoubled(cast<Instruction>(Ext1)) ||
14945 !areExtDoubled(cast<Instruction>(Ext2)))
14961 case Instruction::Sub:
14980 case Instruction::Mul:
14981 case Instruction::ICmp:
14983 case Instruction::Sub:
14984 case Instruction::Shl:
14985 case Instruction::LShr:
14986 case Instruction::AShr:
14987 return Operand == 1;
14994 if (!isa<ShuffleVectorInst>(I->
getOperand(Op)))
14996 if (!IsSinker(I, Op))
15006 for (
Use &U : Shuffle->
uses()) {
15007 Instruction *Insn = cast<Instruction>(U.getUser());
15008 if (!IsSinker(Insn, U.getOperandNo()))
15022 if (
auto *Ld = dyn_cast<MaskedLoadSDNode>(ExtVal.
getOperand(0))) {
15023 if (Ld->isExpandingLoad())
15059 unsigned AS)
const {
15062 return AM.
Scale < 0 ? 1 : 0;
15080 bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(
const MachineFunction &MF,
15106 unsigned Scale = 1;
15123 if ((V & (Scale - 1)) != 0)
15125 return isUInt<5>(V / Scale);
15138 bool IsNeg =
false;
15151 return isShiftedUInt<7,2>(V);
15154 return isShiftedUInt<7,1>(V);
15156 return isUInt<7>(V);
15164 return isShiftedUInt<8, 1>(V);
15167 return isShiftedUInt<8, 2>(V);
15169 if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) {
15173 return isUInt<12>(V);
15199 default:
return false;
15204 return isUInt<12>(V);
15212 return isShiftedUInt<8, 2>(V);
15218 int Scale = AM.
Scale;
15223 default:
return false;
15231 Scale = Scale & ~1;
15232 return Scale == 2 || Scale == 4 || Scale == 8;
15249 if (Scale & 1)
return false;
15256 const int Scale = AM.
Scale;
15266 return (Scale == 1) || (!AM.
HasBaseReg && Scale == 2);
15282 switch (AM.
Scale) {
15299 int Scale = AM.
Scale;
15301 default:
return false;
15305 if (Scale < 0) Scale = -Scale;
15313 if (Scale == 1 || (AM.
HasBaseReg && Scale == -1))
15326 if (Scale & 1)
return false;
15346 return Imm >= 0 && Imm <= 255;
15361 return AbsImm >= 0 && AbsImm <= 255;
15375 int RHSC = (int)RHS->getZExtValue();
15376 if (RHSC < 0 && RHSC > -256) {
15389 int RHSC = (int)RHS->getZExtValue();
15390 if (RHSC < 0 && RHSC > -0x1000) {
15432 int RHSC = (int)RHS->getZExtValue();
15433 if (RHSC < 0 && RHSC > -0x100) {
15438 }
else if (RHSC > 0 && RHSC < 0x100) {
15454 if (!isa<ConstantSDNode>(Ptr->
getOperand(1)))
15460 bool CanChangeType = isLE && !
IsMasked;
15465 auto IsInRange = [&](
int RHSC,
int Limit,
int Scale) {
15466 if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) {
15471 }
else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) {
15483 if (Align >= 2 && IsInRange(RHSC, 0x80, 2))
15486 if (IsInRange(RHSC, 0x80, 1))
15488 }
else if (Align >= 4 &&
15490 IsInRange(RHSC, 0x80, 4))
15492 else if (Align >= 2 &&
15494 IsInRange(RHSC, 0x80, 2))
15496 else if ((CanChangeType || VT ==
MVT::v16i8) && IsInRange(RHSC, 0x80, 1))
15518 Ptr =
LD->getBasePtr();
15519 VT =
LD->getMemoryVT();
15520 Align =
LD->getAlignment();
15522 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
15523 Ptr = ST->getBasePtr();
15524 VT = ST->getMemoryVT();
15525 Align = ST->getAlignment();
15527 Ptr =
LD->getBasePtr();
15528 VT =
LD->getMemoryVT();
15529 Align =
LD->getAlignment();
15533 Ptr = ST->getBasePtr();
15534 VT = ST->getMemoryVT();
15535 Align = ST->getAlignment();
15541 bool isLegal =
false;
15576 VT =
LD->getMemoryVT();
15577 Ptr =
LD->getBasePtr();
15578 Align =
LD->getAlignment();
15581 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
15582 VT = ST->getMemoryVT();
15583 Ptr = ST->getBasePtr();
15584 Align = ST->getAlignment();
15585 isNonExt = !ST->isTruncatingStore();
15587 VT =
LD->getMemoryVT();
15588 Ptr =
LD->getBasePtr();
15589 Align =
LD->getAlignment();
15594 VT = ST->getMemoryVT();
15595 Ptr = ST->getBasePtr();
15596 Align = ST->getAlignment();
15597 isNonExt = !ST->isTruncatingStore();
15609 if (!RHS || RHS->getZExtValue() != 4)
15619 bool isLegal =
false;
15654 const APInt &DemandedElts,
15656 unsigned Depth)
const {
15685 Known.
One &= KnownRHS.
One;
15693 case Intrinsic::arm_ldaex:
15694 case Intrinsic::arm_ldrex: {
15695 EVT VT = cast<MemIntrinsicSDNode>(
Op)->getMemoryVT();
15723 "VGETLANE index out of bounds");
15735 Known = Known.
sext(DstSz);
15737 Known = Known.
zext(DstSz,
true );
15747 const APInt &DemandedAPInt,
15774 unsigned ShrunkMask = Mask & Demanded;
15775 unsigned ExpandedMask = Mask | ~Demanded;
15779 if (ShrunkMask == 0)
15785 if (ExpandedMask == ~0U)
15788 auto IsLegalMask = [ShrunkMask, ExpandedMask](
unsigned Mask) ->
bool {
15789 return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask &
Mask) == 0;
15791 auto UseMask = [
Mask,
Op, VT, &TLO](
unsigned NewMask) ->
bool {
15792 if (NewMask == Mask)
15801 if (IsLegalMask(0xFF))
15802 return UseMask(0xFF);
15805 if (IsLegalMask(0xFFFF))
15806 return UseMask(0xFFFF);
15810 if (ShrunkMask < 256)
15811 return UseMask(ShrunkMask);
15815 if ((
int)ExpandedMask <= -2 && (
int)ExpandedMask >= -256)
15816 return UseMask(ExpandedMask);
15843 switch (AsmPieces.
size()) {
15844 default:
return false;
15846 AsmStr = AsmPieces[0];
15851 if (AsmPieces.
size() == 3 &&
15852 AsmPieces[0] ==
"rev" && AsmPieces[1] ==
"$0" && AsmPieces[2] ==
"$1" &&
15888 unsigned S = Constraint.
size();
15890 switch (Constraint[0]) {
15902 }
else if (S == 2) {
15903 switch (Constraint[0]) {
15923 if (!CallOperandVal)
15927 switch (*constraint) {
15947 using RCPair = std::pair<unsigned, const TargetRegisterClass *>;
15951 switch (Constraint.
size()) {
15954 switch (Constraint[0]) {
15957 return RCPair(0U, &ARM::tGPRRegClass);
15958 return RCPair(0U, &ARM::GPRRegClass);
15961 return RCPair(0U, &ARM::hGPRRegClass);
15965 return RCPair(0U, &ARM::tGPRRegClass);
15966 return RCPair(0U, &ARM::GPRRegClass);
15971 return RCPair(0U, &ARM::SPRRegClass);
15973 return RCPair(0U, &ARM::DPRRegClass);
15975 return RCPair(0U, &ARM::QPRRegClass);
15981 return RCPair(0U, &ARM::SPR_8RegClass);
15983 return RCPair(0U, &ARM::DPR_8RegClass);
15985 return RCPair(0U, &ARM::QPR_8RegClass);
15991 return RCPair(0U, &ARM::SPRRegClass);
15993 return RCPair(0U, &ARM::DPR_VFP2RegClass);
15995 return RCPair(0U, &ARM::QPR_VFP2RegClass);
16001 if (Constraint[0] ==
'T') {
16002 switch (Constraint[1]) {
16006 return RCPair(0U, &ARM::tGPREvenRegClass);
16008 return RCPair(0U, &ARM::tGPROddRegClass);
16017 if (
StringRef(
"{cc}").equals_lower(Constraint))
16018 return std::make_pair(
unsigned(ARM::CPSR), &ARM::CCRRegClass);
16026 std::string &Constraint,
16027 std::vector<SDValue>&Ops,
16032 if (Constraint.length() != 1)
return;
16034 char ConstraintLetter = Constraint[0];
16035 switch (ConstraintLetter) {
16038 case 'I':
case 'J':
case 'K':
case 'L':
16039 case 'M':
case 'N':
case 'O':
16045 int CVal = (int) CVal64;
16048 if (CVal != CVal64)
16051 switch (ConstraintLetter) {
16056 if (CVal >= 0 && CVal <= 65535)
16063 if (CVal >= 0 && CVal <= 255)
16065 }
else if (Subtarget->
isThumb2()) {
16084 if (CVal >= -255 && CVal <= -1)
16090 if (CVal >= -4095 && CVal <= 4095)
16103 }
else if (Subtarget->
isThumb2()) {
16126 if (CVal >= -7 && CVal < 7)
16128 }
else if (Subtarget->
isThumb2()) {
16151 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
16157 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
16165 if (CVal >= 0 && CVal <= 31)
16174 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
16184 Ops.push_back(Result);
16194 "Unhandled Opcode in getDivRemLibcall");
16200 case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
break;
16201 case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
break;
16202 case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
break;
16203 case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64;
break;
16212 "Unhandled Opcode in getDivRemArgList");
16216 TargetLowering::ArgListEntry
Entry;
16222 Entry.IsSExt = isSigned;
16223 Entry.IsZExt = !isSigned;
16224 Args.push_back(Entry);
16235 "Register-based DivRem lowering only");
16238 "Invalid opcode for Div/Rem lowering");
16256 SDValue Div = DAG.
getNode(DivOpcode, dl, VT, Dividend, Divisor);
16260 SDValue Values[2] = {Div, Rem};
16286 return CallInfo.first;
16293 std::vector<Type*> RetTyParams;
16294 Type *RetTyElement;
16304 RetTyParams.push_back(RetTyElement);
16305 RetTyParams.push_back(RetTyElement);
16322 CallLoweringInfo CLI(DAG);
16323 CLI.setChain(InChain)
16325 .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(
SDLoc(N));
16326 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
16329 SDNode *ResNode = CallResult.first.getNode();
16344 "no-stack-arg-probe")) {
16345 unsigned Align = cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue();
16353 SDValue Ops[2] = { SP, Chain };
16370 SDValue Ops[2] = { NewSP, Chain };
16379 assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 &&
16380 "Unexpected type for custom-lowering FP_EXTEND");
16383 "With both FP DP and 16, any FP conversion is legal!");
16386 "With FP16, 16 to 32 conversion is legal!");
16389 if (SrcSz == 32 && DstSz == 64 && Subtarget->
hasFP64()) {
16407 MakeLibCallOptions CallOptions;
16409 for (
unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) {
16410 bool Supported = (Sz == 16 ? Subtarget->
hasFP16() : Subtarget->
hasFP64());
16423 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
16424 "Unexpected type for custom-lowering FP_EXTEND");
16425 std::tie(SrcVal, Chain) =
makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions,
16430 return IsStrict ? DAG.
getMergeValues({SrcVal, Chain}, Loc) : SrcVal;
16442 assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 &&
16443 "Unexpected type for custom-lowering FP_ROUND");
16446 "With both FP DP and 16, any FP conversion is legal!");
16451 if (SrcSz == 32 && Subtarget->
hasFP16())
16456 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
16457 "Unexpected type for custom-lowering FP_ROUND");
16458 MakeLibCallOptions CallOptions;
16461 std::tie(Result, Chain) =
makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions,
16463 return IsStrict ? DAG.
getMergeValues({Result, Chain}, Loc) : Result;
16477 unsigned OpTypeBits = HalfT.getScalarSizeInBits();
16505 if (v == 0xffffffff)
16517 bool ForCodeSize)
const {
16535 unsigned Intrinsic)
const {
16536 switch (Intrinsic) {
16537 case Intrinsic::arm_neon_vld1:
16538 case Intrinsic::arm_neon_vld2:
16539 case Intrinsic::arm_neon_vld3:
16540 case Intrinsic::arm_neon_vld4:
16541 case Intrinsic::arm_neon_vld2lane:
16542 case Intrinsic::arm_neon_vld3lane:
16543 case Intrinsic::arm_neon_vld4lane:
16544 case Intrinsic::arm_neon_vld2dup:
16545 case Intrinsic::arm_neon_vld3dup:
16546 case Intrinsic::arm_neon_vld4dup: {
16555 Info.
align =
MaybeAlign(cast<ConstantInt>(AlignArg)->getZExtValue());
16560 case Intrinsic::arm_neon_vld1x2:
16561 case Intrinsic::arm_neon_vld1x3:
16562 case Intrinsic::arm_neon_vld1x4: {
16575 case Intrinsic::arm_neon_vst1:
16576 case Intrinsic::arm_neon_vst2:
16577 case Intrinsic::arm_neon_vst3:
16578 case Intrinsic::arm_neon_vst4:
16579 case Intrinsic::arm_neon_vst2lane:
16580 case Intrinsic::arm_neon_vst3lane:
16581 case Intrinsic::arm_neon_vst4lane: {
16585 unsigned NumElts = 0;
16590 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
16596 Info.
align =
MaybeAlign(cast<ConstantInt>(AlignArg)->getZExtValue());
16601 case Intrinsic::arm_neon_vst1x2:
16602 case Intrinsic::arm_neon_vst1x3:
16603 case Intrinsic::arm_neon_vst1x4: {
16607 unsigned NumElts = 0;
16612 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
16622 case Intrinsic::arm_ldaex:
16623 case Intrinsic::arm_ldrex: {
16634 case Intrinsic::arm_stlex:
16635 case Intrinsic::arm_strex: {
16646 case Intrinsic::arm_stlexd:
16647 case Intrinsic::arm_strexd:
16656 case Intrinsic::arm_ldaexd:
16657 case Intrinsic::arm_ldrexd:
16680 if (Bits == 0 || Bits > 32)
16686 unsigned Index)
const {
16772 return (Size == 64) && !Subtarget->
isMClass();
16798 return (Size <= (Subtarget->
isMClass() ? 32U : 64U) && hasAtomicRMW)
16810 bool HasAtomicCmpXchg =
16819 return InsertFencesForAtomic;
16840 F->addAttribute(1, Attribute::AttrKind::InReg);
16858 unsigned &Cost)
const {
16872 if (!isa<ConstantInt>(Idx))
16876 unsigned BitWidth = cast<VectorType>(VectorTy)->
getBitWidth();
16879 if (BitWidth == 64 || BitWidth == 128) {
16901 Type *ValTy = cast<PointerType>(Addr->
getType())->getElementType();
16909 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
16926 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
16931 cast<PointerType>(Addr->
getType())->getElementType());
16953 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
16965 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
17010 if (ElSize != 8 && ElSize != 16 && ElSize != 32)
17015 if (Subtarget->
hasNEON() && VecSize == 64)
17017 return VecSize % 128 == 0;
17043 "Invalid interleave factor");
17044 assert(!Shuffles.
empty() &&
"Empty shufflevector input");
17046 "Unmatched number of shufflevectors and indices");
17072 if (NumLoads > 1) {
17076 VecTy->getVectorNumElements() / NumLoads);
17082 BaseAddr, VecTy->getVectorElementType()->getPointerTo(
17088 auto createLoadIntrinsic = [&](
Value *BaseAddr) {
17091 Type *Tys[] = {VecTy, Int8Ptr};
17092 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
17093 Intrinsic::arm_neon_vld3,
17094 Intrinsic::arm_neon_vld4};
17102 return Builder.
CreateCall(VldnFunc, Ops,
"vldN");
17104 assert((Factor == 2 || Factor == 4) &&
17105 "expected interleave factor of 2 or 4 for MVE");
17107 Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q;
17110 Type *Tys[] = {VecTy, VecEltTy};
17116 return Builder.
CreateCall(VldnFunc, Ops,
"vldN");
17125 for (
unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
17131 VecTy->getVectorNumElements() * Factor);
17133 CallInst *VldN = createLoadIntrinsic(BaseAddr);
17137 for (
unsigned i = 0; i < Shuffles.
size(); i++) {
17139 unsigned Index = Indices[i];
17149 SubVecs[SV].push_back(SubVec);
17158 auto &SubVec = SubVecs[SVI];
17161 SVI->replaceAllUsesWith(WideVec);
17195 unsigned Factor)
const {
17197 "Invalid interleave factor");
17201 "Invalid interleaved store");
17238 if (NumStores > 1) {
17241 LaneLen /= NumStores;
17256 auto createStoreIntrinsic = [&](
Value *BaseAddr,
17259 static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
17260 Intrinsic::arm_neon_vst3,
17261 Intrinsic::arm_neon_vst4};
17263 Type *Tys[] = {Int8Ptr, SubVecTy};
17266 SI->
getModule(), StoreInts[Factor - 2], Tys);
17270 for (
auto S : Shuffles)
17275 assert((Factor == 2 || Factor == 4) &&
17276 "expected interleave factor of 2 or 4 for MVE");
17278 Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q;
17281 Type *Tys[] = {EltPtrTy, SubVecTy};
17287 for (
auto S : Shuffles)
17289 for (
unsigned F = 0;
F < Factor;
F++) {
17297 for (
unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
17300 if (StoreCount > 0)
17302 BaseAddr, LaneLen * Factor);
17307 for (
unsigned i = 0; i < Factor; i++) {
17308 unsigned IdxI = StoreCount * LaneLen * Factor + i;
17309 if (
Mask[IdxI] >= 0) {
17313 unsigned StartMask = 0;
17314 for (
unsigned j = 1; j < LaneLen; j++) {
17315 unsigned IdxJ = StoreCount * LaneLen * Factor + j;
17316 if (
Mask[IdxJ * Factor + IdxI] >= 0) {
17317 StartMask =
Mask[IdxJ * Factor + IdxI] - IdxJ;
17332 createStoreIntrinsic(BaseAddr, Shuffles);
17346 uint64_t &Members) {
17347 if (
auto *ST = dyn_cast<StructType>(Ty)) {
17348 for (
unsigned i = 0; i < ST->getNumElements(); ++i) {
17349 uint64_t SubMembers = 0;
17352 Members += SubMembers;
17354 }
else if (
auto *AT = dyn_cast<ArrayType>(Ty)) {
17355 uint64_t SubMembers = 0;
17358 Members += SubMembers * AT->getNumElements();
17369 }
else if (
auto *VT = dyn_cast<VectorType>(Ty)) {
17376 return VT->getBitWidth() == 64;
17378 return VT->getBitWidth() == 128;
17380 switch (VT->getBitWidth()) {
17393 return (Members > 0 && Members <= 4);
17401 return ABITypeAlign;
17413 if (getEffectiveCallingConv(CallConv, isVarArg) !=
17418 uint64_t Members = 0;
17423 return IsHA || IsIntArray;
17427 const Constant *PersonalityFn)
const {
17430 return Subtarget->
useSjLjEH() ? ARM::NoRegister : ARM::R0;
17434 const Constant *PersonalityFn)
const {
17437 return Subtarget->
useSjLjEH() ? ARM::NoRegister : ARM::R1;
17446 void ARMTargetLowering::insertCopiesSplitCSR(
17460 RC = &ARM::GPRRegClass;
17461 else if (ARM::DPRRegClass.
contains(*
I))
17462 RC = &ARM::DPRRegClass;
17473 Attribute::NoUnwind) &&
17474 "Function should be nounwind in insertCopiesSplitCSR!");
17480 for (
auto *Exit : Exits)
17482 TII->
get(TargetOpcode::COPY), *
I)
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG)
PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG)
std::pair< Value *, Value * > ShuffleOps
We are building a shuffle to create V, which is a sequence of insertelement, extractelement pairs...
static SDValue PerformVDUPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool isValidMVECond(unsigned CC, bool IsFloat)
bool isMachineConstantPoolEntry() const
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, bool *Fast) const override
allowsMisalignedMemoryAccesses - Returns true if the target allows unaligned memory accesses of the s...
Type * getVectorElementType() const
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
void setFrameAddressIsTaken(bool T)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Value * getValueOperand()
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, SelectionDAG &DAG)
BC is a bitcast that is about to be turned into a VMOVDRR.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
static MVT getIntegerVT(unsigned BitWidth)
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
const MachineInstrBuilder & add(const MachineOperand &MO) const
static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, SelectionDAG &DAG)
BUILTIN_OP_END - This must be the last enum value in this list.
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
A parsed version of the target data layout string in and methods for querying it. ...
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static bool isVZIPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
reference emplace_back(ArgTypes &&... Args)
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
static SDValue PerformTruncatingStoreCombine(StoreSDNode *St, SelectionDAG &DAG)
EVT getValueType() const
Return the ValueType of the referenced return value.
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool RetFastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool isTargetGNUAEABI() const
class_match< UndefValue > m_Undef()
Match an arbitrary undef constant.
const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const override
getRegClassFor - Return the register class that should be used for the specified value type...
Value * CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name="")
static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
void markGlobalAsPromotedToConstantPool(const GlobalVariable *GV)
Indicate to the backend that GV has had its storage changed to inside a constant pool.
void finalizeLowering(MachineFunction &MF) const override
Execute target specific actions to finalize target lowering.
iterator_range< use_iterator > uses()
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
bool hasCallSiteLandingPad(MCSymbol *Sym)
Return true if the landing pad Eh symbol has an associated call site.
static bool isConstant(const MachineInstr &MI)
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt)
isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left...
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
static MachinePointerInfo getJumpTable(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a jump table entry.
static const APInt * isPowerOf2Constant(SDValue V)
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand...
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the value type to use for ISD::SETCC.
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
uint64_t getZExtValue() const
Get zero extended value.
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
This class represents an incoming formal argument to a Function.
bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to fold a pair of shifts into a mask.
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
MachineBasicBlock * getMBB() const
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
bool isOSMSVCRT() const
Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
static ARMConstantPoolSymbol * Create(LLVMContext &C, StringRef s, unsigned ID, unsigned char PCAdj)
static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain)
Atomic ordering constants.
int getFunctionContextIndex() const
Return the index for the function context object.
static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT, SelectionDAG &DAG)
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector...
bool CC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag...
static bool areExtractExts(Value *Ext1, Value *Ext2)
Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth of the vector elements...
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
BR_CC - Conditional branch.
This class represents lattice values for constants.
static SDValue PerformVMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVMULCombine Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the special multi...
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static bool IsMasked(Instruction *I)
bool hasDivideInThumbMode() const
Register getLocReg() const
static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, SelectionDAG &DAG)
Align getStackAlignment() const
std::pair< unsigned, const TargetRegisterClass * > RCPair
Type * getParamType(unsigned i) const
Parameter type accessors.
const unsigned char * bytes_end() const
int getFP16Imm(const APInt &Imm)
getFP16Imm - Return an 8-bit floating-point version of the 16-bit floating-point value.
static MVT getVectorVT(MVT VT, unsigned NumElements)
static bool IsVUZPShuffleNode(SDNode *N)
TOF
Target Operand Flag enum.
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0...
bool hasVAStart() const
Returns true if the function calls the llvm.va_start intrinsic.
StringRef getPrivateGlobalPrefix() const
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
unsigned EnableDebugEntryValues
Emit debug info about parameter's entry values.
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
A Module instance is used to store all the information related to an LLVM module. ...
const char * LowerXConstraint(EVT ConstraintVT) const override
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
static SDValue PerformADDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDECombine - Target-specific dag combine transform from ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL.
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
amdgpu Simplify well known AMD library false FunctionCallee Value const Twine & Name
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
Instruction * emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
An instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
int getSplatIndex() const
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
const SDValue & getBasePtr() const
static SDValue PerformInsertEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformInsertEltCombine - Target-specific dag combine xforms for ISD::INSERT_VECTOR_ELT.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
EABI EABIVersion
EABIVersion - This flag specifies the EABI version.
static bool isVTRNMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG &DAG)
SkipLoadExtensionForVMULL - return a load of the original vector size that does not do any sign/zero ...
void push_back(const T &Elt)
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const
PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
ARMConstantPoolValue - ARM specific constantpool value.
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, bool isSigned)
isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each element has been zero/sign-...
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
static cl::opt< unsigned > MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden, cl::desc("Maximum interleave factor for MVE VLDn to generate."), cl::init(2))
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
void setIsDef(bool Val=true)
Change a def to a use, or a use to a def.
Describe properties that are true of each instruction in the target description file.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override
static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, uint64_t &Members)
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
MachineMemOperand::Flags flags
Y = RRC X, rotate right via carry.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
static bool hasNormalLoadOperand(SDNode *N)
hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node are normal, non-volatile loads.
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const
Returns true if the addressing mode representing by AM is legal for the Thumb1 target, for a load/store of the specified type.
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
SDVTList getVTList() const
This class represents a function call, abstracting a target machine's calling convention.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
EK_Inline - Jump table entries are emitted inline at their point of use.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElement(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
Register getFrameRegister(const MachineFunction &MF) const override
Global Offset Table, Thread Pointer Offset.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
static unsigned SelectPairHalf(unsigned Elements, ArrayRef< int > Mask, unsigned Index)
static MVT getFloatingPointVT(unsigned BitWidth)
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
const std::string & getAsmString() const
const SDValue & getChain() const
static SDValue PerformSUBCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
static bool isLegalT1AddressImmediate(int64_t V, EVT VT)
static cl::opt< bool > ARMInterworking("arm-interworking", cl::Hidden, cl::desc("Enable / disable ARM interworking (for debugging only)"), cl::init(true))
Function Alias Analysis Results
This instruction constructs a fixed permutation of two input vectors.
bool isTargetCOFF() const
unsigned getValNo() const
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const
unsigned getAlignment() const
int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG)
SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending load, or BUILD_VECTOR with extended elements, return the unextended value.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, const ARMSubtarget *ST, const SDLoc &dl)
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
static bool isVMOVNMask(ArrayRef< int > M, EVT VT, bool Top)
auto count_if(R &&Range, UnaryPredicate P) -> typename std::iterator_traits< decltype(adl_begin(Range))>::difference_type
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
bool sgt(const APInt &RHS) const
Signed greater than comparison.
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
const Use & getOperandUse(unsigned i) const
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
bool hasDLLImportStorageClass() const
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
C - The default llvm calling convention, compatible with C.
GlobalVariable * getGlobalVariable(StringRef Name) const
Look up the specified global variable in the module symbol table.
APInt trunc(unsigned width) const
Truncate to new width.
static bool isLegalMVEShuffleOp(unsigned PFEntry)
STATISTIC(NumFunctions, "Total number of functions")
unsigned const TargetRegisterInfo * TRI
static SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, TargetLowering::DAGCombinerInfo &DCI)
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
void setIsDead(bool Val=true)
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getScalarValueSizeInBits() const
static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, unsigned SplatBitSize, SelectionDAG &DAG, const SDLoc &dl, EVT &VT, bool is128Bits, VMOVModImmType type)
isVMOVModifiedImm - Check if the specified splat value corresponds to a valid vector constant for a N...
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
ThreeOps_match< V1_t, V2_t, Mask_t, Instruction::ShuffleVector > m_ShuffleVector(const V1_t &v1, const V2_t &v2, const Mask_t &m)
Matches ShuffleVectorInst.
An instruction for reading from memory.
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1...
bool isThumb1Only() const
static IntegerType * getInt64Ty(LLVMContext &C)
bool hasExternalWeakLinkage() const
bool isTargetMuslAEABI() const
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
const SDNodeFlags getFlags() const
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
SDNode * getNode() const
get the SDNode which holds the desired result
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool isVectorTy() const
True if this is an instance of VectorType.
bool hasAcquireRelease() const
bool alignLoopsWithOptSize() const override
Should loops be aligned even when the function is marked OptSize (but not MinSize).
static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
Global Offset Table, PC Relative.
Synchronized with respect to signal handlers executing in the same thread.
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
static IntegerType * getInt16Ty(LLVMContext &C)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue CombineBaseUpdate(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, NEON load/store intrinsics...
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
static SDValue PerformShiftCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *ST)
PerformShiftCombine - Checks for immediate versions of vector shifts and lowers them.
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
unsigned getBitWidth() const
Get the bit width of this value.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst *> Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a vldN intrinsic.
bool isTargetHardFloat() const
constexpr bool isMask_32(uint32_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
static bool isThumb(const MCSubtargetInfo &STI)
unsigned createPICLabelUId()
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static Optional< bool > isBigEndian(const ArrayRef< int64_t > ByteOffsets, int64_t FirstOffset)
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
static bool allUsersAreInFunction(const Value *V, const Function *F)
Return true if all users of V are within function F, looking through ConstantExprs.
unsigned getBitWidth() const
Return the number of bits in the APInt.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
SDValue getExternalSymbol(const char *Sym, EVT VT)
return AArch64::GPR64RegClass contains(Reg)
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
bool isTruncatingStore() const
Return true if the op does a truncation before store.
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a vstN intrinsic.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist...
bool genExecuteOnly() const
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode *> &Visited, SmallVectorImpl< const SDNode *> &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
const ARMBaseInstrInfo * getInstrInfo() const override
bool hasV8MBaselineOps() const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize=false) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively...
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Value * getArgOperand(unsigned i) const
static SDValue PerformHWLoopCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *ST)
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
bool match(Val *V, const Pattern &P)
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "...
bool hasOneUse() const
Return true if there is exactly one use of this node.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool isBeforeLegalize() const
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is +0.0.
A description of a memory reference used in the backend.
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
MO_SBREL - On a symbol operand, this represents a static base relative relocation.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
static SDValue PerformLOADCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
const HexagonInstrInfo * TII
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
Function * getSSPStackGuardCheck(const Module &M) const override
If the target has a standard stack protection check function that performs validation and error handl...
static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1) intrinsic...
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Shift and rotation operations.
static bool isVUZPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
unsigned getNumOperands() const
Retuns the total number of operands.
static bool isUpperSaturate(const SDValue LHS, const SDValue RHS, const SDValue TrueVal, const SDValue FalseVal, const ISD::CondCode CC, const SDValue K)
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
LLVMContext & getContext() const
Get the global data context.
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef...
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth...
A Use represents the edge between a Value definition and its users.
static SDValue findMUL_LOHI(SDValue V)
static bool isVREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isVREVMask - Check if a vector shuffle corresponds to a VREV instruction with the specified blocksize...
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
static bool isVZIP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of "vector_shuffle v...
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, int64_t Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object...
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG)
PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for ISD::VECTOR_SHUFFLE.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
separate const offset from Split GEPs to a variadic base and a constant offset for better CSE
static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, bool &Negate)
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
CopyToReg - This node has three operands: a chain, a register number to set to this value...
bool isIntegerTy() const
True if this is an instance of IntegerType.
op_iterator op_end() const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
LLVM_NODISCARD R Default(T Value)
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
FLT_ROUNDS_ - Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest 2 Round to ...
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
This file contains the simple types necessary to represent the attributes associated with functions a...
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
bool CC_ARM_APCS_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl)
getZeroVector - Returns a vector of specified type with all zero elements.
The memory access is dereferenceable (i.e., doesn't trap).
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
uint64_t decodeVMOVModImm(unsigned ModImm, unsigned &EltBits)
decodeVMOVModImm - Decode a NEON/MVE modified immediate value into the element value and the element ...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
static bool isSingletonVEXTMask(ArrayRef< int > M, EVT VT, unsigned &Imm)
const DataLayout & getDataLayout() const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG)
isZeroExtended - Check if a node is a vector value that is zero-extended or a constant BUILD_VECTOR w...
static ShiftOpc getShiftOpcForNode(unsigned Opcode)
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
uint64_t getNumElements() const
For scalable vectors, this will return the minimum number of elements in the vector.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
SmallVectorImpl< unsigned > & getCallSiteLandingPad(MCSymbol *Sym)
Get the call site indexes for a landing pad EH symbol.
LocInfo getLocInfo() const
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
static StructType * get(LLVMContext &Context, ArrayRef< Type *> Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
This file implements a class to represent arbitrary precision integral constant values and operations...
void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \\\)
SplitString - Split up the specified string according to the specified delimiters, appending the result fragments to the output list.
static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2)
Return the store opcode for a given store size.
static mvt_range integer_fixedlen_vector_valuetypes()
unsigned getArgRegsSaveSize() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPostIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mo...
This class is used to represent an MSTORE node.
bool isGVIndirectSymbol(const GlobalValue *GV) const
True if the GV will be accessed via an indirect symbol.
SmallVector< ISD::InputArg, 32 > Ins
AtomicOrdering
Atomic ordering for LLVM's memory model.
static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
STACKSAVE - STACKSAVE has one operand, an input chain.
bool CC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const override
Return true if the target can combine store(extractelement VectorTy, Idx).
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
static const unsigned PerfectShuffleTable[6561+1]
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
unsigned getActiveBits() const
Compute the number of active bits in the value.
int64_t getSExtValue() const
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only...
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
static bool isIdentityMask(ArrayRef< int > Mask)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
int64_t getSExtValue() const
Get sign extended value.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
Constant * createSequentialMask(IRBuilder<> &Builder, unsigned Start, unsigned NumInts, unsigned NumUndefs)
Create a sequential shuffle mask.
Type * getType() const
All values are typed, get the type of this value.
MachineFunction & getMachineFunction() const
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
bool RetCC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here...
static bool isZeroVector(SDValue N)
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
SDValue getRegisterMask(const uint32_t *RegMask)
static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, unsigned Align, bool isSEXTLoad, bool IsMasked, bool isLE, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, const GlobalValue *GV, SelectionDAG &DAG, EVT PtrVT, const SDLoc &dl)
unsigned createVMOVModImm(unsigned OpCmode, unsigned Val)
const TargetMachine & getTarget() const
BasicBlock * GetInsertBlock() const
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
static bool isSRL16(const SDValue &Op)
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd)
static bool isStore(int Opcode)
static SDValue PerformVCMPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
SmallVector< ISD::OutputArg, 32 > Outs
bool expandABS(SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand ABS nodes.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool useNEONForSinglePrecisionFP() const
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_NODISCARD size_t size() const
size - Get the string size.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
CallLoweringInfo & setZExtResult(bool Value=true)
bool isTargetDarwin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
bool hasV8_1MMainlineOps() const
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
void setIsSplitCSR(bool s)
An instruction for storing to memory.
static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
void setReg(Register Reg)
Change the register this operand corresponds to.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
op_iterator op_begin() const
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask)
static EVT getVectorTyFromPredicateVector(EVT VT)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
static Function * getFunction(Constant *C)
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
bool isStrongerThanMonotonic(AtomicOrdering ao)
static const MCPhysReg GPRArgRegs[]
Value * CreateZExtOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
static SDValue PerformAddeSubeCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG)
static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
MVT getVectorElementType() const
std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type cast(const Y &Val)
Value * getOperand(unsigned i) const
Analysis containing CSE Info
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
static SDValue FindBFIToCombineWith(SDNode *N)
Class to represent pointers.
unsigned getByValSize() const
UNDEF - An undefined node.
This class is used to represent ISD::STORE nodes.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
bool isReadOnly(const GlobalValue *GV) const
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG)
PerformVMOVDRRCombine - Target-specific dag combine xforms for ARMISD::VMOVDRR.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
TargetInstrInfo - Interface to description of machine instruction set.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
int getPromotedConstpoolIncrease() const
bool hasDivideInARMMode() const
AddrOpc getAM2Op(unsigned AM2Opc)
bool isTargetWatchABI() const
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, const TargetInstrInfo *TII, const DebugLoc &dl, unsigned LdSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
Emit a post-increment load operation with given size.
static EVT getExtensionTo64Bits(const EVT &OrigVT)
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool isOSWindows() const
Tests whether the OS is Windows.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
The memory access is volatile.
void setReturnRegsCount(unsigned s)
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata *> MDs)
const SDValue & getBasePtr() const
static SDValue PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
A switch()-like statement whose cases are string literals.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
initializer< Ty > init(const Ty &Val)
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
static SDValue PerformBFICombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
bool isAllOnesValue() const
Determine if all bits are set.
Control flow instructions. These all have token chains.
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
bool preferISHSTBarriers() const
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
int getVarArgsFrameIndex() const
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
bool useSoftFloat() const override
constexpr bool isUInt< 8 >(uint64_t x)
unsigned const MachineRegisterInfo * MRI
static bool isSignExtended(SDNode *N, SelectionDAG &DAG)
isSignExtended - Check if a node is a vector value that is sign-extended or a constant BUILD_VECTOR w...
static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned countPopulation() const
Count the number of bits set.
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool shouldAssumeDSOLocal(const Module &M, const GlobalValue *GV) const
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double, and whose elements are just simple data values (i.e.
bool isExpandingLoad() const
static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) can replace combinations of ...
Value * getCalledValue() const
Value * concatenateVectors(IRBuilder<> &Builder, ArrayRef< Value *> Vecs)
Concatenate a list of vectors.
static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
LLVM Basic Block Representation.
static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, TargetLowering::DAGCombinerInfo &DCI, bool AllOnes=false)
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isOptionalDef() const
Set if this operand is a optional def.
The instances of the Type class are immutable: once they are created, they are never changed...
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, unsigned &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass...
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr kills the specified register.
This is an important class for using LLVM in a threaded context.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
Simple binary floating point operators.
KnownBits zext(unsigned BitWidth, bool ExtendedBitsAreKnownZero) const
Extends the underlying known Zero and One bits.
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
size_t size() const
size - Get the array size.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
void resetAll()
Resets the known state of all bits.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
bool isFloatingPointOperation() const
const SDValue & getOperand(unsigned Num) const
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG)
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isSHL16(const SDValue &Op)
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
bool isPointerTy() const
True if this is an instance of PointerType.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
ARM_AAPCS - ARM Architecture Procedure Calling Standard calling convention (aka EABI).
static cl::opt< unsigned > ConstpoolPromotionMaxSize("arm-promote-constant-max-size", cl::Hidden, cl::desc("Maximum size of constant to promote into a constant pool"), cl::init(64))
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St, SelectionDAG &DAG)
bool isTargetWatchOS() const
ConstantFP - Floating Point Values [float, double].
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
static bool isReverseMask(ArrayRef< int > M, EVT VT)
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
bool isAcquireOrStronger(AtomicOrdering ao)
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static bool isZeroOrAllOnes(SDValue N, bool AllOnes)
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void AddToWorklist(SDNode *N)
const SDValue & getOffset() const
static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, const SDValue TrueVal, const SDValue FalseVal, const ISD::CondCode CC, const SDValue K)
static mvt_range fp_valuetypes()
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC)
IntCCToARMCC - Convert a DAG integer condition code to an ARM CC.
static SDValue LowerSADDSUBSAT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static bool BitsProperlyConcatenate(const APInt &A, const APInt &B)
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to move this shift by a constant amount though its operand...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static Type * getVoidTy(LLVMContext &C)
This class provides iterator support for SDUse operands that use a specific SDNode.
static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, SelectionDAG &DAG)
static bool isSRA16(const SDValue &Op)
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
bool CombineTo(SDValue O, SDValue N)
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
match_combine_or< CastClass_match< OpTy, Instruction::ZExt >, CastClass_match< OpTy, Instruction::SExt > > m_ZExtOrSExt(const OpTy &Op)
static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, SDValue &SatK)
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
void computeMaxCallFrameSize(const MachineFunction &MF)
Computes the maximum size of a callframe and the AdjustsStack property.
TRAP - Trapping instruction.
const APInt & getAPIntValue() const
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
const Triple & getTargetTriple() const
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, SDValue &RetVal1, SDValue &RetVal2)
Value * getPointerOperand()
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
DEBUGTRAP - Trap intended to get the attention of a debugger.
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
getScalingFactorCost - Return the cost of the scaling used in addressing mode represented by AM...
SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
static cl::opt< bool > EnableConstpoolPromotion("arm-promote-constant", cl::Hidden, cl::desc("Enable / disable promotion of unnamed_addr constants into " "constant pools"), cl::init(false))
self_iterator getIterator()
unsigned getOriginalAlignment() const
Returns alignment and volatility of the memory access.
The memory access is non-temporal.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
const SDValue & get() const
If implicit conversion to SDValue doesn't work, the get() method returns the SDValue.
Class to represent integer types.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y)...
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
bool allowsUnalignedMem() const
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
static void ReplaceCMP_SWAP_64Results(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
static SDValue PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
const ARMSubtarget * getSubtarget() const
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
const MachineInstrBuilder & addFrameIndex(int Idx) const
unsigned getInRegsParamsProcessed() const
bool isThumb1OnlyFunction() const
Bit counting operators with an undefined result for zero inputs.
static SDValue PerformVDUPLANECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformVDUPLANECombine - Target-specific dag combine xforms for ARMISD::VDUPLANE. ...
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool RetCC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
SmallPtrSet< const GlobalVariable *, 2 > & getGlobalsPromotedToConstantPool()
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
succ_iterator succ_begin()
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
std::vector< ArgListEntry > ArgListTy
static SDValue PerformORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformORCombine - Target-specific dag combine xforms for ISD::OR.
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
unsigned getAlignment() const
static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, SDValue N1, MutableArrayRef< int > Mask, SelectionDAG &DAG) const
Tries to build a legal vector shuffle using the provided parameters or equivalent variations...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1...
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
static SDValue PerformORCombineToSMULWBT(SDNode *OR, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
bool isPositionIndependent() const
This structure contains all information that is necessary for lowering calls.
static bool isLTorLE(ISD::CondCode CC)
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
static ARMConstantPoolMBB * Create(LLVMContext &C, const MachineBasicBlock *mbb, unsigned ID, unsigned char PCAdj)
const TargetMachine & getTargetMachine() const
This class contains a discriminated union of information about pointers in memory operands...
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, SelectionDAG &DAG)
unsigned getNumOperands() const
Return the number of values used by this operation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
const std::string & getConstraintString() const
This struct is a compact representation of a valid (non-zero power of two) alignment.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool isLegalInterleavedAccessType(unsigned Factor, VectorType *VecTy, const DataLayout &DL) const
Returns true if VecTy is a legal interleaved access type.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
Triple - Helper class for working with autoconf configuration names.
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC)
Override the default CondCode to be used to test the result of the comparison libcall against zero...
The memory access writes data.
static const int BlockSize
bool RetCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool isReleaseOrStronger(AtomicOrdering ao)
bool use_empty() const
Return true if there are no uses of this node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type...
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set.
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
TokenFactor - This node takes multiple tokens as input and produces a single token result...
void dump() const
Dump this node, for debugging.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned TargetFlags=0)
Iterator for intrusive lists based on ilist_node.
void setPromotedConstpoolIncrease(int Sz)
CCState - This class holds information needed while lowering arguments and return values...
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG)
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
BlockVerifier::State From
static MachineOperand t1CondCodeOp(bool isDead=false)
Get the operand corresponding to the conditional code result for Thumb1.
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
Align max(MaybeAlign Lhs, Align Rhs)
bool useSoftFloat() const
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
bool hasFPARMv8Base() const
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
isShuffleMaskLegal - Targets can use this to indicate that they only support some VECTOR_SHUFFLE oper...
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isTargetAEABI() const
This struct is a compact representation of a valid (power of two) or undefined (0) alignment...
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete, but still used on some target...
const SDValue & getMask() const
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt)
Getvshiftimm - Check if this is a valid build_vector for the immediate operand of a vector shift oper...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
virtual unsigned getMaxSupportedInterleaveFactor() const
Get the maximum supported factor for interleaved memory accesses.
static uint64_t add(uint64_t LeftOp, uint64_t RightOp)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
MachineOperand class - Representation of each machine instruction operand.
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
Module.h This file contains the declarations for the Module class.
static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode)
AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total value size to 64 bits...
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
bool hasMVEFloatOps() const
bool isFPBrccSlow() const
const InstrItineraryData * getInstrItineraryData() const override
getInstrItins - Return the instruction itineraries based on subtarget selection.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
Provides information about what library functions are available for the current target.
void addCallSiteInfo(const SDNode *CallNode, CallSiteInfoImpl &&CallInfo)
CCValAssign - Represent assignment of one arg/retval to a location.
bool isTargetAndroid() const
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
static void ReplaceREADCYCLECOUNTER(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
bool hasMPExtension() const
BRCOND - Conditional branch.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const SDValue & getPassThru() const
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Byte Swap and Counting operators.
static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
LLVM_NODISCARD T pop_back_val()
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
This is an abstract virtual class for memory operations.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
const Constant * getConstVal() const
static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG)
bool isCalledByLegalizer() const
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
CallLoweringInfo & setSExtResult(bool Value=true)
unsigned getAM2Offset(unsigned AM2Opc)
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override
Return true if SHIFT instructions should be expanded to SHIFT_PARTS instructions, and false if a libr...
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
FunctionCallee getOrInsertFunction(StringRef Name, FunctionType *T, AttributeList AttributeList)
Look up the specified function in the module symbol table.
Represents one node in the SelectionDAG.
static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, bool &swpCmpOps, bool &swpVselOps)
void setAdjustsStack(bool V)
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
KnownBits sext(unsigned BitWidth) const
Sign extends the underlying known Zero and One bits.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
const Function & getFunction() const
Return the LLVM function that this machine code represents.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
unsigned logBase2() const
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static mvt_range integer_valuetypes()
The access may modify the value stored in memory.
static bool isS16(const SDValue &Op, SelectionDAG &DAG)
MachinePointerInfo getWithOffset(int64_t O) const
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
unsigned getVectorNumElements() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
static SDValue PerformABSCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
size_t use_size() const
Return the number of uses of this node.
unsigned getPreferredAlignment(const GlobalVariable *GV) const
Returns the preferred alignment of the specified global.
static unsigned isNEONTwoResultShuffleMask(ArrayRef< int > ShuffleMask, EVT VT, unsigned &WhichResult, bool &isV_UNDEF)
Check if ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), and return the corresponding AR...
bool ExpandInlineAsm(CallInst *CI) const override
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to...
Class to represent vector types.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
static SDValue PerformSTORECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformSTORECombine - Target-specific dag combine xforms for ISD::STORE.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT...
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
EVT getMemoryVT() const
Return the type of the in-memory value.
Target - Wrapper for Target specific information.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
Class for arbitrary precision integers.
unsigned getByValAlign() const
CodeModel::Model getCodeModel() const
Returns the code model.
static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG)
Instruction * makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const
iterator_range< use_iterator > uses()
amdgpu Simplify well known AMD library false FunctionCallee Callee
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
BBTy * getParent() const
Get the basic block containing the call site.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static use_iterator use_end()
Align getABIAlignmentForCallingConv(Type *ArgTy, DataLayout DL) const override
Return the correct alignment for the current calling convention.
typename SuperClass::iterator iterator
iterator_range< user_iterator > users()
ZERO_EXTEND - Used for integer types, zeroing the new bits.
bool CC_ARM_Win32_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
static SDValue PerformAddcSubcCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ANY_EXTEND - Used for integer types. The high bits are undefined.
bool genLongCalls() const
iterator insert(iterator I, T &&Elt)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
int getMaskElt(unsigned Idx) const
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const TargetInstrInfo *TII)
MatchingStackOffset - Return true if the given stack call argument is already available in the same p...
ElementCount getVectorElementCount() const
void setArgumentStackSize(unsigned size)
static SDValue PerformXORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
bool hasVMLxForwarding() const
static bool isVTBLMask(ArrayRef< int > M, EVT VT)
bool isEmpty() const
Returns true if there are no itineraries.
Flags
Flags values. These may be or'd together.
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
const MachineBasicBlock * getParent() const
static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) can replace combinations of ...
bool isUnknown() const
Returns true if we don't know any bits.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Value * CreateTruncOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
Section Relative (Windows TLS)
BR_JT - Jumptable branch.
static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, SelectionDAG &DAG)
Representation of each machine instruction.
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
bool HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns true if Val1 has a lower Constant Materialization Cost than Val2.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const Triple & getTargetTriple() const
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static CondCodes getOppositeCondition(CondCodes CC)
Represents a use of a SDNode.
static bool canChangeToInt(SDValue Op, bool &SeenZero, const ARMSubtarget *Subtarget)
canChangeToInt - Given the fp compare operand, return true if it is suitable to morph to an integer c...
bool hasRetAddrStack() const
void setVarArgsFrameIndex(int Index)
SmallVector< SDValue, 32 > OutVals
bool is64BitVector() const
Return true if this is a 64-bit vector type.
static SDValue PerformVMOVRRDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVMOVRRDCombine - Target-specific dag combine xforms for ARMISD::VMOVRRD.
static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static bool LowerToByteSwap(CallInst *CI)
Try to replace a call instruction with a call to a bswap intrinsic.
unsigned getNumArgOperands() const
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
bool isVector() const
Return true if this is a vector value type.
const uint32_t * getSjLjDispatchPreservedMask(const MachineFunction &MF) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const unsigned char * bytes_begin() const
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Bitwise operators - logical and, logical or, logical xor.
bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2, return the log base 2 integer value.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
unsigned getAlignment() const
Return the alignment of the access that is being performed.
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
bool isStrongDefinitionForLinker() const
Returns true if this global's definition will be the one chosen by the linker.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
unsigned getLocMemOffset() const
ObjectFormatType getObjectFormat() const
getFormat - Get the object format for this triple.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
static bool isVUZP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of "vector_shuffle v...
bool isEHPad() const
Returns true if the block is a landing pad.
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
LLVM_NODISCARD bool empty() const
bool is128BitVector() const
Return true if this is a 128-bit vector type.
StringRef getValueAsString() const
Return the attribute's value as a string.
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, MachineInstr &MI, const SDNode *Node)
Attaches vregs to MEMCPY that it will use as scratch registers when it is expanded into LDM/STM...
static SDValue LowerInterruptReturn(SmallVectorImpl< SDValue > &RetOps, const SDLoc &DL, SelectionDAG &DAG)
static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG)
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value...
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
bool isVectorLoadExtDesirable(SDValue ExtVal) const override
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable...
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
static bool isLegalT2AddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
const Function * getParent() const
Return the enclosing method, or null if none.
static bool isLegalAddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
isLegalAddressImmediate - Return true if the integer value can be used as the offset of the target ad...
void setArgRegsSaveSize(unsigned s)
static MachineOperand CreateImm(int64_t Val)
static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
const TargetSubtargetInfo & getSubtarget() const
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, const TargetInstrInfo *TII, const DebugLoc &dl, unsigned StSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
Emit a post-increment store operation with given size.
Flags getFlags() const
Return the raw flags of the source value,.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
const ARMBaseRegisterInfo * getRegisterInfo() const override
static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, ARMCC::CondCodes &CondCode2)
FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
The memory access always returns the same value (or traps).
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
static cl::opt< unsigned > ConstpoolPromotionMaxTotal("arm-promote-constant-max-total", cl::Hidden, cl::desc("Maximum size of ALL constants to promote into a constant pool"), cl::init(128))
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
unsigned InferPtrAlignment(SDValue Ptr) const
Infer alignment of a load / store address.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
Rename collisions when linking (static functions).
static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
const SDValue & getBasePtr() const
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate, that is the target has add instructions which can add a register and the immediate without having to materialize the immediate into a register.
static const int LAST_INDEXED_MODE
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
unsigned getOpcode() const
FSINCOS - Compute both fsin and fcos as a single operation.
SDValue getValue(unsigned R) const
unsigned getInRegsParamsCount() const
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
constexpr bool isUInt< 16 >(uint64_t x)
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.setjmp intrinsic.
SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const
PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
unsigned getAlignment() const
Return the alignment of the access that is being performed FIXME: Remove this function once transitio...
static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
LLVM_NODISCARD bool empty() const
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded, TargetLoweringOpt &TLO) const override
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getCondCode(ISD::CondCode Cond)
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
unsigned getPrefLoopLogAlignment() const
static bool isGTorGE(ISD::CondCode CC)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isTargetMachO() const
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
const MachinePointerInfo & getPointerInfo() const
static SDValue PerformVLDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
MachineConstantPoolValue * getMachineCPVal() const
bool CC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static SDValue PerformORCombineToBFI(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
bool hasFnAttribute(Attribute::AttrKind Kind) const
Equivalent to hasAttribute(AttributeList::FunctionIndex, Kind) but may be faster. ...
static RTLIB::Libcall getDivRemLibcall(const SDNode *N, MVT::SimpleValueType SVT)
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructions which can compare a register against the immediate without having to materialize the immediate into a register.
bool hasAtomicStore() const
Return true if this atomic instruction stores to memory.
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
static bool isVTRN_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of "vector_shuffle v...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, SDValue &CC, bool &Invert, SDValue &OtherOp, SelectionDAG &DAG)
void insert(iterator MBBI, MachineBasicBlock *MBB)
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
This class is used to represent an MLOAD node.
void setReturnAddressIsTaken(bool s)
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
void getInRegsParamInfo(unsigned InRegsParamRecordIndex, unsigned &BeginReg, unsigned &EndReg) const
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
TypeSize getScalarSizeInBits() const
static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, MachineBasicBlock *BB, const TargetRegisterInfo *TRI)
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
CallLoweringInfo & setInRegister(bool Value=true)
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
ArrayRef< int > getMask() const
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
FMA - Perform a * b + c with no intermediate rounding step.
Integer reductions may have a result type larger than the vector element type.
static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG)
SDValue getRegister(unsigned Reg, EVT VT)
bool supportsTailCall() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
Fast - This calling convention attempts to make calls as fast as possible (e.g.
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
unsigned countTrailingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the least significant bit to the first zero bit.
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2)
Return the load opcode for a given load size.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
void push_back(MachineBasicBlock *MBB)
static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
ARMTargetLowering(const TargetMachine &TM, const ARMSubtarget &STI)
SDValue getValueType(EVT)
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
const MCOperandInfo * OpInfo
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
PREFETCH - This corresponds to a prefetch intrinsic.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which...
bool hasMVEIntegerOps() const
bool isUndef() const
Return true if the type of the node type undefined.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target...
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
static SDValue PerformSHLSimplify(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *ST)
unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
void rewindByValRegsInfo()
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone...
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc, or post-dec.
bool isTargetWindows() const
Primary interface to the complete machine description for the target machine.
unsigned createJumpTableIndex(const std::vector< MachineBasicBlock *> &DestBBs)
createJumpTableIndex - Create a new jump table.
static mvt_range fixedlen_vector_valuetypes()
Type * getElementType() const
const APFloat & getValueAPF() const
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
constexpr bool isShiftedMask_32(uint32_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (32 bit ver...
ISD::LoadExtType getExtensionType() const
StringRef - Represent a constant reference to a string, i.e.
static SDValue PerformORCombine_i1(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
SetCC operator - This evaluates to a true value iff the condition is true.
void RemoveOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
bool isOSVersionLT(unsigned Major, unsigned Minor=0, unsigned Micro=0) const
isOSVersionLT - Helper function for doing comparisons against version numbers included in the target ...
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
Type * getArrayElementType() const
static MachineBasicBlock * OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ)
unsigned countLeadingZeros() const
The APInt version of the countLeadingZeros functions in MathExtras.h.
static BranchProbability getZero()
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
bool operator==(uint64_t V1, const APInt &V2)
unsigned getNumOperands() const
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
bool hasAnyDataBarrier() const
Register getReg() const
getReg - Returns the register number.
bool isStrictFPOpcode()
Test if this node is a strict floating point pseudo-op.
static bool isVolatile(Instruction *Inst)
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
const SDValue & getOperand(unsigned i) const
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
uint64_t getZExtValue() const
TRUNCATE - Completely drop the high bits.
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
bool isWindowsMSVCEnvironment() const
Checks if the environment could be MSVC.
bool isBitFieldInvertedMask(unsigned v)
static SDValue CombineANDShift(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
bool FastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
unsigned AllocateReg(unsigned Reg)
AllocateReg - Attempt to allocate one register.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass...
bool hasDataBarrier() const
const MachineOperand & getOperand(unsigned i) const
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand *> NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
Instruction * emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
OutputIt copy(R &&Range, OutputIt Out)
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
ExpandBITCAST - If the target supports VFP, this function is called to expand a bit convert where eit...
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
static SDValue AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
Perform various unary floating-point operations inspired by libm.
VectorType * getType() const
Overload to return most specific vector type.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
static bool isSaturatingConditional(const SDValue &Op, SDValue &V, uint64_t &K, bool &usat)
Value * getPointerOperand()
Function Alias Analysis false
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
static IntegerType * getInt8Ty(LLVMContext &C)
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
The operation is expected to be selectable directly by the target, and no transformation is necessary...
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
LLVMContext * getContext() const
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
static bool isSplatMask(const int *Mask, EVT VT)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one...
static bool isVEXTMask(ArrayRef< int > M, EVT VT, bool &ReverseVEXT, unsigned &Imm)
static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, unsigned AlignCheck)
Type * getElementType() const
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
std::vector< MachineBasicBlock * >::iterator succ_iterator
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG)
Carry-using nodes for multiple precision addition and subtraction.
static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
Wrapper class representing virtual and physical registers.
bool empty() const
empty - Check if the array is empty.
bool isArrayTy() const
True if this is an instance of ArrayType.
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
This file describes how to lower LLVM code to machine code.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
FloatABI::ABIType FloatABIType
FloatABIType - This setting is set by -float-abi=xxx option is specfied on the command line...
bool shouldSinkOperands(Instruction *I, SmallVectorImpl< Use *> &Ops) const override
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
static TargetLowering::ArgListTy getDivRemArgList(const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget)
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
This class is used to represent ISD::LOAD nodes.
static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V)
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...